aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/R600
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-04-23 16:57:46 -0700
committerStephen Hines <srhines@google.com>2014-04-24 15:53:16 -0700
commit36b56886974eae4f9c5ebc96befd3e7bfe5de338 (patch)
treee6cfb69fbbd937f450eeb83bfb83b9da3b01275a /test/CodeGen/R600
parent69a8640022b04415ae9fac62f8ab090601d8f889 (diff)
downloadexternal_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.zip
external_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.tar.gz
external_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.tar.bz2
Update to LLVM 3.5a.
Change-Id: Ifadecab779f128e62e430c2b4f6ddd84953ed617
Diffstat (limited to 'test/CodeGen/R600')
-rw-r--r--test/CodeGen/R600/32-bit-local-address-space.ll64
-rw-r--r--test/CodeGen/R600/add.ll102
-rw-r--r--test/CodeGen/R600/add_i64.ll39
-rw-r--r--test/CodeGen/R600/address-space.ll9
-rw-r--r--test/CodeGen/R600/anyext.ll14
-rw-r--r--test/CodeGen/R600/array-ptr-calc-i32.ll31
-rw-r--r--test/CodeGen/R600/atomic_load_add.ll4
-rw-r--r--test/CodeGen/R600/atomic_load_sub.ll4
-rw-r--r--test/CodeGen/R600/basic-branch.ll15
-rw-r--r--test/CodeGen/R600/basic-loop.ll18
-rw-r--r--test/CodeGen/R600/bfe_uint.ll2
-rw-r--r--test/CodeGen/R600/bitcast.ll9
-rw-r--r--test/CodeGen/R600/cayman-loop-bug.ll32
-rw-r--r--test/CodeGen/R600/cf-stack-bug.ll227
-rw-r--r--test/CodeGen/R600/codegen-prepare-addrmode-sext.ll19
-rw-r--r--test/CodeGen/R600/elf.r600.ll2
-rw-r--r--test/CodeGen/R600/extload.ll91
-rw-r--r--test/CodeGen/R600/fabs.ll14
-rw-r--r--test/CodeGen/R600/fadd.ll37
-rw-r--r--test/CodeGen/R600/fceil.ll84
-rw-r--r--test/CodeGen/R600/ffloor.ll84
-rw-r--r--test/CodeGen/R600/fneg-fabs.ll55
-rw-r--r--test/CodeGen/R600/fneg.ll14
-rw-r--r--test/CodeGen/R600/ftrunc.ll84
-rw-r--r--test/CodeGen/R600/gep-address-space.ll14
-rw-r--r--test/CodeGen/R600/gv-const-addrspace.ll41
-rw-r--r--test/CodeGen/R600/icmp64.ll92
-rw-r--r--test/CodeGen/R600/indirect-private-64.ll75
-rw-r--r--test/CodeGen/R600/infinite-loop-evergreen.ll10
-rw-r--r--test/CodeGen/R600/infinite-loop.ll17
-rw-r--r--test/CodeGen/R600/insert_vector_elt.ll179
-rw-r--r--test/CodeGen/R600/insert_vector_elt_f64.ll36
-rw-r--r--test/CodeGen/R600/jump-address.ll2
-rw-r--r--test/CodeGen/R600/lds-oqap-crash.ll28
-rw-r--r--test/CodeGen/R600/lds-output-queue.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll40
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll40
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfi.ll41
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfm.ll40
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imax.ll29
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imin.ll29
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.kill.ll22
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umax.ll44
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umin.ll44
-rw-r--r--test/CodeGen/R600/llvm.SI.load.dword.ll40
-rw-r--r--test/CodeGen/R600/llvm.SI.sample-masked.ll16
-rw-r--r--test/CodeGen/R600/llvm.SI.sample.ll6
-rw-r--r--test/CodeGen/R600/llvm.SI.sampled.ll4
-rw-r--r--test/CodeGen/R600/llvm.SI.sendmsg.ll21
-rw-r--r--test/CodeGen/R600/llvm.SI.tbuffer.store.ll18
-rw-r--r--test/CodeGen/R600/llvm.exp2.ll26
-rw-r--r--test/CodeGen/R600/llvm.pow.ll29
-rw-r--r--test/CodeGen/R600/llvm.trunc.ll13
-rw-r--r--test/CodeGen/R600/load.ll197
-rw-r--r--test/CodeGen/R600/load64.ll20
-rw-r--r--test/CodeGen/R600/local-64.ll158
-rw-r--r--test/CodeGen/R600/local-memory-two-objects.ll11
-rw-r--r--test/CodeGen/R600/local-memory.ll6
-rw-r--r--test/CodeGen/R600/loop-idiom.ll54
-rw-r--r--test/CodeGen/R600/mad_uint24.ll16
-rw-r--r--test/CodeGen/R600/mubuf.ll98
-rw-r--r--test/CodeGen/R600/mul.ll12
-rw-r--r--test/CodeGen/R600/mul_uint24.ll16
-rw-r--r--test/CodeGen/R600/or.ll134
-rw-r--r--test/CodeGen/R600/private-memory.ll121
-rw-r--r--test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll59
-rw-r--r--test/CodeGen/R600/register-count-comments.ll20
-rw-r--r--test/CodeGen/R600/salu-to-valu.ll48
-rw-r--r--test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll162
-rw-r--r--test/CodeGen/R600/select-vectors.ll155
-rw-r--r--test/CodeGen/R600/select64.ll15
-rw-r--r--test/CodeGen/R600/setcc-equivalent.ll30
-rw-r--r--test/CodeGen/R600/sext-in-reg.ll271
-rw-r--r--test/CodeGen/R600/si-annotate-cf-assertion.ll1
-rw-r--r--test/CodeGen/R600/si-sgpr-spill.ll880
-rw-r--r--test/CodeGen/R600/smrd.ll80
-rw-r--r--test/CodeGen/R600/store-v3i32.ll12
-rw-r--r--test/CodeGen/R600/store-v3i64.ll28
-rw-r--r--test/CodeGen/R600/store-vector-ptrs.ll1
-rw-r--r--test/CodeGen/R600/store.ll23
-rw-r--r--test/CodeGen/R600/trunc-store-i1.ll32
-rw-r--r--test/CodeGen/R600/trunc.ll36
-rw-r--r--test/CodeGen/R600/unhandled-loop-condition-assertion.ll114
-rw-r--r--test/CodeGen/R600/unroll.ll37
-rw-r--r--test/CodeGen/R600/v1i64-kernel-arg.ll17
-rw-r--r--test/CodeGen/R600/v_cndmask.ll13
-rw-r--r--test/CodeGen/R600/vtx-fetch-branch.ll29
-rw-r--r--test/CodeGen/R600/vtx-schedule.ll4
-rw-r--r--test/CodeGen/R600/xor.ll18
-rw-r--r--test/CodeGen/R600/zero_extend.ll10
90 files changed, 4679 insertions, 311 deletions
diff --git a/test/CodeGen/R600/32-bit-local-address-space.ll b/test/CodeGen/R600/32-bit-local-address-space.ll
index 7a12687..fffaefe 100644
--- a/test/CodeGen/R600/32-bit-local-address-space.ll
+++ b/test/CodeGen/R600/32-bit-local-address-space.ll
@@ -11,7 +11,7 @@
; CHECK-LABEL: @local_address_load
; CHECK: V_MOV_B32_e{{32|64}} [[PTR:v[0-9]]]
-; CHECK: DS_READ_B32 [[PTR]]
+; CHECK: DS_READ_B32 v{{[0-9]+}}, [[PTR]]
define void @local_address_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
%0 = load i32 addrspace(3)* %in
@@ -32,9 +32,8 @@ entry:
}
; CHECK-LABEL: @local_address_gep_const_offset
-; CHECK: S_ADD_I32 [[SPTR:s[0-9]]]
-; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
-; CHECK: DS_READ_B32 [[VPTR]]
+; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], s{{[0-9]+}}
+; CHECK: DS_READ_B32 v{{[0-9]+}}, [[VPTR]], 4,
define void @local_address_gep_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
%0 = getelementptr i32 addrspace(3)* %in, i32 1
@@ -43,6 +42,19 @@ entry:
ret void
}
+; Offset too large, can't fold into 16-bit immediate offset.
+; CHECK-LABEL: @local_address_gep_large_const_offset
+; CHECK: S_ADD_I32 [[SPTR:s[0-9]]], s{{[0-9]+}}, 65540
+; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; CHECK: DS_READ_B32 [[VPTR]]
+define void @local_address_gep_large_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
+entry:
+ %0 = getelementptr i32 addrspace(3)* %in, i32 16385
+ %1 = load i32 addrspace(3)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
; CHECK-LABEL: @null_32bit_lds_ptr:
; CHECK: V_CMP_NE_I32
; CHECK-NOT: V_CMP_NE_I32
@@ -69,7 +81,7 @@ define void @mul_32bit_ptr(float addrspace(1)* %out, [3 x float] addrspace(3)* %
; CHECK-LABEL: @infer_ptr_alignment_global_offset:
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], 0
-; CHECK: DS_READ_B32 v{{[0-9]+}}, 0, [[REG]]
+; CHECK: DS_READ_B32 v{{[0-9]+}}, [[REG]]
define void @infer_ptr_alignment_global_offset(float addrspace(1)* %out, i32 %tid) {
%val = load float addrspace(3)* @g_lds
store float %val, float addrspace(1)* %out
@@ -80,9 +92,47 @@ define void @infer_ptr_alignment_global_offset(float addrspace(1)* %out, i32 %ti
@ptr = addrspace(3) global i32 addrspace(3)* null
@dst = addrspace(3) global [16384 x i32] zeroinitializer
-; SI-LABEL: @global_ptr:
-; SI-CHECK: DS_WRITE_B32
+; CHECK-LABEL: @global_ptr:
+; CHECK: DS_WRITE_B32
define void @global_ptr() nounwind {
store i32 addrspace(3)* getelementptr ([16384 x i32] addrspace(3)* @dst, i32 0, i32 16), i32 addrspace(3)* addrspace(3)* @ptr
ret void
}
+
+; CHECK-LABEL: @local_address_store
+; CHECK: DS_WRITE_B32
+define void @local_address_store(i32 addrspace(3)* %out, i32 %val) {
+ store i32 %val, i32 addrspace(3)* %out
+ ret void
+}
+
+; CHECK-LABEL: @local_address_gep_store
+; CHECK: S_ADD_I32 [[SADDR:s[0-9]+]],
+; CHECK: V_MOV_B32_e32 [[ADDR:v[0-9]+]], [[SADDR]]
+; CHECK: DS_WRITE_B32 [[ADDR]], v{{[0-9]+}},
+define void @local_address_gep_store(i32 addrspace(3)* %out, i32, i32 %val, i32 %offset) {
+ %gep = getelementptr i32 addrspace(3)* %out, i32 %offset
+ store i32 %val, i32 addrspace(3)* %gep, align 4
+ ret void
+}
+
+; CHECK-LABEL: @local_address_gep_const_offset_store
+; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], s{{[0-9]+}}
+; CHECK: V_MOV_B32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
+; CHECK: DS_WRITE_B32 [[VPTR]], [[VAL]], 4
+define void @local_address_gep_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
+ %gep = getelementptr i32 addrspace(3)* %out, i32 1
+ store i32 %val, i32 addrspace(3)* %gep, align 4
+ ret void
+}
+
+; Offset too large, can't fold into 16-bit immediate offset.
+; CHECK-LABEL: @local_address_gep_large_const_offset_store
+; CHECK: S_ADD_I32 [[SPTR:s[0-9]]], s{{[0-9]+}}, 65540
+; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; CHECK: DS_WRITE_B32 [[VPTR]], v{{[0-9]+}}, 0
+define void @local_address_gep_large_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
+ %gep = getelementptr i32 addrspace(3)* %out, i32 16385
+ store i32 %val, i32 addrspace(3)* %gep, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/add.ll b/test/CodeGen/R600/add.ll
index 3d5506b..e9db52a 100644
--- a/test/CodeGen/R600/add.ll
+++ b/test/CodeGen/R600/add.ll
@@ -1,10 +1,9 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=FUNC %s
-;EG-CHECK-LABEL: @test1:
+;FUNC-LABEL: @test1:
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @test1:
;SI-CHECK: V_ADD_I32_e32 [[REG:v[0-9]+]], {{v[0-9]+, v[0-9]+}}
;SI-CHECK-NOT: [[REG]]
;SI-CHECK: BUFFER_STORE_DWORD [[REG]],
@@ -17,11 +16,10 @@ define void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-;EG-CHECK-LABEL: @test2:
+;FUNC-LABEL: @test2:
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @test2:
;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -34,13 +32,12 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;EG-CHECK-LABEL: @test4:
+;FUNC-LABEL: @test4:
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @test4:
;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -54,3 +51,92 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @test8
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+define void @test8(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) {
+entry:
+ %0 = add <8 x i32> %a, %b
+ store <8 x i32> %0, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @test16
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+define void @test16(<16 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) {
+entry:
+ %0 = add <16 x i32> %a, %b
+ store <16 x i32> %0, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @add64
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADDC_U32
+define void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+entry:
+ %0 = add i64 %a, %b
+ store i64 %0, i64 addrspace(1)* %out
+ ret void
+}
+
+; The V_ADDC_U32 and V_ADD_I32 instruction can't read SGPRs, because they
+; use VCC. The test is designed so that %a will be stored in an SGPR and
+; %0 will be stored in a VGPR, so the comiler will be forced to copy %a
+; to a VGPR before doing the add.
+
+; FUNC-LABEL: @add64_sgpr_vgpr
+; SI-CHECK-NOT: V_ADDC_U32_e32 s
+define void @add64_sgpr_vgpr(i64 addrspace(1)* %out, i64 %a, i64 addrspace(1)* %in) {
+entry:
+ %0 = load i64 addrspace(1)* %in
+ %1 = add i64 %a, %0
+ store i64 %1, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/add_i64.ll b/test/CodeGen/R600/add_i64.ll
index 303a1cb..7081b07 100644
--- a/test/CodeGen/R600/add_i64.ll
+++ b/test/CodeGen/R600/add_i64.ll
@@ -1,14 +1,13 @@
-; XFAIL: *
-; This will fail until i64 add is enabled
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
-; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI %s
-
-declare i32 @llvm.SI.tid() readnone
+declare i32 @llvm.r600.read.tidig.x() readnone
; SI-LABEL: @test_i64_vreg:
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
define void @test_i64_vreg(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) {
- %tid = call i32 @llvm.SI.tid() readnone
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
%a_ptr = getelementptr i64 addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr i64 addrspace(1)* %inB, i32 %tid
%a = load i64 addrspace(1)* %a_ptr
@@ -20,6 +19,8 @@ define void @test_i64_vreg(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noa
; Check that the SGPR add operand is correctly moved to a VGPR.
; SI-LABEL: @sgpr_operand:
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
define void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 addrspace(1)* noalias %in_bar, i64 %a) {
%foo = load i64 addrspace(1)* %in, align 8
%result = add i64 %foo, %a
@@ -31,6 +32,8 @@ define void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noal
; SGPR as other operand.
;
; SI-LABEL: @sgpr_operand_reversed:
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
define void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %a) {
%foo = load i64 addrspace(1)* %in, align 8
%result = add i64 %a, %foo
@@ -40,6 +43,10 @@ define void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace
; SI-LABEL: @test_v2i64_sreg:
+; SI: S_ADD_I32
+; SI: S_ADDC_U32
+; SI: S_ADD_I32
+; SI: S_ADDC_U32
define void @test_v2i64_sreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %a, <2 x i64> %b) {
%result = add <2 x i64> %a, %b
store <2 x i64> %result, <2 x i64> addrspace(1)* %out
@@ -47,8 +54,12 @@ define void @test_v2i64_sreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %a,
}
; SI-LABEL: @test_v2i64_vreg:
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
define void @test_v2i64_vreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
- %tid = call i32 @llvm.SI.tid() readnone
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
%a_ptr = getelementptr <2 x i64> addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr <2 x i64> addrspace(1)* %inB, i32 %tid
%a = load <2 x i64> addrspace(1)* %a_ptr
@@ -57,3 +68,17 @@ define void @test_v2i64_vreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> add
store <2 x i64> %result, <2 x i64> addrspace(1)* %out
ret void
}
+
+; SI-LABEL: @trunc_i64_add_to_i32
+; SI: S_LOAD_DWORD [[SREG0:s[0-9]+]],
+; SI: S_LOAD_DWORD [[SREG1:s[0-9]+]],
+; SI: S_ADD_I32 [[SRESULT:s[0-9]+]], [[SREG1]], [[SREG0]]
+; SI-NOT: ADDC
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+define void @trunc_i64_add_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+ %add = add i64 %b, %a
+ %trunc = trunc i64 %add to i32
+ store i32 %trunc, i32 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/address-space.ll b/test/CodeGen/R600/address-space.ll
index 1fc616a..15d2ed2 100644
--- a/test/CodeGen/R600/address-space.ll
+++ b/test/CodeGen/R600/address-space.ll
@@ -4,11 +4,14 @@
%struct.foo = type { [3 x float], [3 x float] }
+; FIXME: Extra V_MOV from SGPR to VGPR for second read. The address is
+; already in a VGPR after the first read.
+
; CHECK-LABEL: @do_as_ptr_calcs:
-; CHECK: S_ADD_I32 {{s[0-9]+}},
-; CHECK: S_ADD_I32 [[SREG1:s[0-9]+]],
+; CHECK: S_LOAD_DWORD [[SREG1:s[0-9]+]],
; CHECK: V_MOV_B32_e32 [[VREG1:v[0-9]+]], [[SREG1]]
-; CHECK: DS_READ_B32 [[VREG1]],
+; CHECK: DS_READ_B32 v{{[0-9]+}}, [[VREG1]], 20
+; CHECK: DS_READ_B32 v{{[0-9]+}}, v{{[0-9]+}}, 12
define void @do_as_ptr_calcs(%struct.foo addrspace(3)* nocapture %ptr) nounwind {
entry:
%x = getelementptr inbounds %struct.foo addrspace(3)* %ptr, i32 0, i32 1, i32 0
diff --git a/test/CodeGen/R600/anyext.ll b/test/CodeGen/R600/anyext.ll
new file mode 100644
index 0000000..bbe5d0a
--- /dev/null
+++ b/test/CodeGen/R600/anyext.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+
+; CHECK-LABEL: @anyext_i1_i32
+; CHECK: V_CNDMASK_B32_e64
+define void @anyext_i1_i32(i32 addrspace(1)* %out, i32 %cond) {
+entry:
+ %0 = icmp eq i32 %cond, 0
+ %1 = zext i1 %0 to i8
+ %2 = xor i8 %1, -1
+ %3 = and i8 %2, 1
+ %4 = zext i8 %3 to i32
+ store i32 %4, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/array-ptr-calc-i32.ll b/test/CodeGen/R600/array-ptr-calc-i32.ll
new file mode 100644
index 0000000..cb2a1c8
--- /dev/null
+++ b/test/CodeGen/R600/array-ptr-calc-i32.ll
@@ -0,0 +1,31 @@
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+declare i32 @llvm.SI.tid() nounwind readnone
+declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
+
+; The required pointer calculations for the alloca'd actually requires
+; an add and won't be folded into the addressing, which fails with a
+; 64-bit pointer add. This should work since private pointers should
+; be 32-bits.
+
+; SI-LABEL: @test_private_array_ptr_calc:
+; SI: V_ADD_I32_e32 [[PTRREG:v[0-9]+]]
+; SI: V_MOVRELD_B32_e32 {{v[0-9]+}}, [[PTRREG]]
+define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
+ %alloca = alloca [4 x i32], i32 4, align 16
+ %tid = call i32 @llvm.SI.tid() readnone
+ %a_ptr = getelementptr i32 addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr i32 addrspace(1)* %inB, i32 %tid
+ %a = load i32 addrspace(1)* %a_ptr
+ %b = load i32 addrspace(1)* %b_ptr
+ %result = add i32 %a, %b
+ %alloca_ptr = getelementptr [4 x i32]* %alloca, i32 1, i32 %b
+ store i32 %result, i32* %alloca_ptr, align 4
+ ; Dummy call
+ call void @llvm.AMDGPU.barrier.local() nounwind noduplicate
+ %reload = load i32* %alloca_ptr, align 4
+ %out_ptr = getelementptr i32 addrspace(1)* %out, i32 %tid
+ store i32 %reload, i32 addrspace(1)* %out_ptr, align 4
+ ret void
+}
+
diff --git a/test/CodeGen/R600/atomic_load_add.ll b/test/CodeGen/R600/atomic_load_add.ll
index 0bc48a3..cb0242c 100644
--- a/test/CodeGen/R600/atomic_load_add.ll
+++ b/test/CodeGen/R600/atomic_load_add.ll
@@ -4,7 +4,7 @@
; R600-CHECK-LABEL: @atomic_add_local
; R600-CHECK: LDS_ADD *
; SI-CHECK-LABEL: @atomic_add_local
-; SI-CHECK: DS_ADD_U32_RTN 0
+; SI-CHECK: DS_ADD_U32_RTN
define void @atomic_add_local(i32 addrspace(3)* %local) {
entry:
%0 = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
@@ -14,7 +14,7 @@ entry:
; R600-CHECK-LABEL: @atomic_add_ret_local
; R600-CHECK: LDS_ADD_RET *
; SI-CHECK-LABEL: @atomic_add_ret_local
-; SI-CHECK: DS_ADD_U32_RTN 0
+; SI-CHECK: DS_ADD_U32_RTN
define void @atomic_add_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
entry:
%0 = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
diff --git a/test/CodeGen/R600/atomic_load_sub.ll b/test/CodeGen/R600/atomic_load_sub.ll
index e4a6829..7c26e52 100644
--- a/test/CodeGen/R600/atomic_load_sub.ll
+++ b/test/CodeGen/R600/atomic_load_sub.ll
@@ -4,7 +4,7 @@
; R600-CHECK-LABEL: @atomic_sub_local
; R600-CHECK: LDS_SUB *
; SI-CHECK-LABEL: @atomic_sub_local
-; SI-CHECK: DS_SUB_U32_RTN 0
+; SI-CHECK: DS_SUB_U32_RTN
define void @atomic_sub_local(i32 addrspace(3)* %local) {
entry:
%0 = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
@@ -14,7 +14,7 @@ entry:
; R600-CHECK-LABEL: @atomic_sub_ret_local
; R600-CHECK: LDS_SUB_RET *
; SI-CHECK-LABEL: @atomic_sub_ret_local
-; SI-CHECK: DS_SUB_U32_RTN 0
+; SI-CHECK: DS_SUB_U32_RTN
define void @atomic_sub_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
entry:
%0 = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
diff --git a/test/CodeGen/R600/basic-branch.ll b/test/CodeGen/R600/basic-branch.ll
new file mode 100644
index 0000000..d084132
--- /dev/null
+++ b/test/CodeGen/R600/basic-branch.ll
@@ -0,0 +1,15 @@
+; XFAIL: *
+; RUN: llc -O0 -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
+
+; CHECK-LABEL: @test_branch(
+define void @test_branch(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) nounwind {
+ %cmp = icmp ne i32 %val, 0
+ br i1 %cmp, label %store, label %end
+
+store:
+ store i32 222, i32 addrspace(1)* %out
+ ret void
+
+end:
+ ret void
+}
diff --git a/test/CodeGen/R600/basic-loop.ll b/test/CodeGen/R600/basic-loop.ll
new file mode 100644
index 0000000..6d0ff07
--- /dev/null
+++ b/test/CodeGen/R600/basic-loop.ll
@@ -0,0 +1,18 @@
+; XFAIL: *
+; RUN: llc -O0 -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck %s
+
+; CHECK-LABEL: @test_loop:
+define void @test_loop(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) nounwind {
+entry:
+ br label %loop.body
+
+loop.body:
+ %i = phi i32 [0, %entry], [%i.inc, %loop.body]
+ store i32 222, i32 addrspace(1)* %out
+ %cmp = icmp ne i32 %i, %val
+ %i.inc = add i32 %i, 1
+ br i1 %cmp, label %loop.body, label %end
+
+end:
+ ret void
+}
diff --git a/test/CodeGen/R600/bfe_uint.ll b/test/CodeGen/R600/bfe_uint.ll
index 92570c3..fe466e6 100644
--- a/test/CodeGen/R600/bfe_uint.ll
+++ b/test/CodeGen/R600/bfe_uint.ll
@@ -1,5 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; XFAIL: *
+
; CHECK: @bfe_def
; CHECK: BFE_UINT
define void @bfe_def(i32 addrspace(1)* %out, i32 %x) {
diff --git a/test/CodeGen/R600/bitcast.ll b/test/CodeGen/R600/bitcast.ll
index bccc416..5bfc008 100644
--- a/test/CodeGen/R600/bitcast.ll
+++ b/test/CodeGen/R600/bitcast.ll
@@ -19,3 +19,12 @@ declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float
attributes #0 = { "ShaderType"="0" }
+; CHECK-LABEL: @i8ptr_v16i8ptr
+; CHECK: S_ENDPGM
+define void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) {
+entry:
+ %0 = bitcast i8 addrspace(1)* %in to <16 x i8> addrspace(1)*
+ %1 = load <16 x i8> addrspace(1)* %0
+ store <16 x i8> %1, <16 x i8> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/cayman-loop-bug.ll b/test/CodeGen/R600/cayman-loop-bug.ll
new file mode 100644
index 0000000..a873528
--- /dev/null
+++ b/test/CodeGen/R600/cayman-loop-bug.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s
+
+; CHECK-LABEL: @main
+; CHECK: LOOP_START_DX10
+; CHECK: ALU_PUSH_BEFORE
+; CHECK: LOOP_START_DX10
+; CHECK: PUSH
+; CHECK-NOT: ALU_PUSH_BEFORE
+; CHECK: END_LOOP
+; CHECK: END_LOOP
+define void @main (<4 x float> inreg %reg0) #0 {
+entry:
+ br label %outer_loop
+outer_loop:
+ %cnt = phi i32 [0, %entry], [%cnt_incr, %inner_loop]
+ %cond = icmp eq i32 %cnt, 16
+ br i1 %cond, label %outer_loop_body, label %exit
+outer_loop_body:
+ %cnt_incr = add i32 %cnt, 1
+ br label %inner_loop
+inner_loop:
+ %cnt2 = phi i32 [0, %outer_loop_body], [%cnt2_incr, %inner_loop_body]
+ %cond2 = icmp eq i32 %cnt2, 16
+ br i1 %cond, label %inner_loop_body, label %outer_loop
+inner_loop_body:
+ %cnt2_incr = add i32 %cnt2, 1
+ br label %inner_loop
+exit:
+ ret void
+}
+
+attributes #0 = { "ShaderType"="0" } \ No newline at end of file
diff --git a/test/CodeGen/R600/cf-stack-bug.ll b/test/CodeGen/R600/cf-stack-bug.ll
new file mode 100644
index 0000000..c3a4612
--- /dev/null
+++ b/test/CodeGen/R600/cf-stack-bug.ll
@@ -0,0 +1,227 @@
+; RUN: llc -march=r600 -mcpu=redwood -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG64 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=sumo -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG64 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=barts -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG64 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=turks -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG64 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=caicos -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG64 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=cedar -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG32 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=juniper -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=NOBUG --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=cypress -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=NOBUG --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=cayman -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=NOBUG --check-prefix=FUNC
+
+; REQUIRES: asserts
+
+; We are currently allocating 2 extra sub-entries on Evergreen / NI for
+; non-WQM push instructions if we change this to 1, then we will need to
+; add one level of depth to each of these tests.
+
+; BUG64-NOT: Applying bug work-around
+; BUG32-NOT: Applying bug work-around
+; NOBUG-NOT: Applying bug work-around
+; FUNC-LABEL: @nested3
+define void @nested3(i32 addrspace(1)* %out, i32 %cond) {
+entry:
+ %0 = icmp sgt i32 %cond, 0
+ br i1 %0, label %if.1, label %end
+
+if.1:
+ %1 = icmp sgt i32 %cond, 10
+ br i1 %1, label %if.2, label %if.store.1
+
+if.store.1:
+ store i32 1, i32 addrspace(1)* %out
+ br label %end
+
+if.2:
+ %2 = icmp sgt i32 %cond, 20
+ br i1 %2, label %if.3, label %if.2.store
+
+if.2.store:
+ store i32 2, i32 addrspace(1)* %out
+ br label %end
+
+if.3:
+ store i32 3, i32 addrspace(1)* %out
+ br label %end
+
+end:
+ ret void
+}
+
+; BUG64: Applying bug work-around
+; BUG32-NOT: Applying bug work-around
+; NOBUG-NOT: Applying bug work-around
+; FUNC-LABEL: @nested4
+define void @nested4(i32 addrspace(1)* %out, i32 %cond) {
+entry:
+ %0 = icmp sgt i32 %cond, 0
+ br i1 %0, label %if.1, label %end
+
+if.1:
+ %1 = icmp sgt i32 %cond, 10
+ br i1 %1, label %if.2, label %if.1.store
+
+if.1.store:
+ store i32 1, i32 addrspace(1)* %out
+ br label %end
+
+if.2:
+ %2 = icmp sgt i32 %cond, 20
+ br i1 %2, label %if.3, label %if.2.store
+
+if.2.store:
+ store i32 2, i32 addrspace(1)* %out
+ br label %end
+
+if.3:
+ %3 = icmp sgt i32 %cond, 30
+ br i1 %3, label %if.4, label %if.3.store
+
+if.3.store:
+ store i32 3, i32 addrspace(1)* %out
+ br label %end
+
+if.4:
+ store i32 4, i32 addrspace(1)* %out
+ br label %end
+
+end:
+ ret void
+}
+
+; BUG64: Applying bug work-around
+; BUG32-NOT: Applying bug work-around
+; NOBUG-NOT: Applying bug work-around
+; FUNC-LABEL: @nested7
+define void @nested7(i32 addrspace(1)* %out, i32 %cond) {
+entry:
+ %0 = icmp sgt i32 %cond, 0
+ br i1 %0, label %if.1, label %end
+
+if.1:
+ %1 = icmp sgt i32 %cond, 10
+ br i1 %1, label %if.2, label %if.1.store
+
+if.1.store:
+ store i32 1, i32 addrspace(1)* %out
+ br label %end
+
+if.2:
+ %2 = icmp sgt i32 %cond, 20
+ br i1 %2, label %if.3, label %if.2.store
+
+if.2.store:
+ store i32 2, i32 addrspace(1)* %out
+ br label %end
+
+if.3:
+ %3 = icmp sgt i32 %cond, 30
+ br i1 %3, label %if.4, label %if.3.store
+
+if.3.store:
+ store i32 3, i32 addrspace(1)* %out
+ br label %end
+
+if.4:
+ %4 = icmp sgt i32 %cond, 40
+ br i1 %4, label %if.5, label %if.4.store
+
+if.4.store:
+ store i32 4, i32 addrspace(1)* %out
+ br label %end
+
+if.5:
+ %5 = icmp sgt i32 %cond, 50
+ br i1 %5, label %if.6, label %if.5.store
+
+if.5.store:
+ store i32 5, i32 addrspace(1)* %out
+ br label %end
+
+if.6:
+ %6 = icmp sgt i32 %cond, 60
+ br i1 %6, label %if.7, label %if.6.store
+
+if.6.store:
+ store i32 6, i32 addrspace(1)* %out
+ br label %end
+
+if.7:
+ store i32 7, i32 addrspace(1)* %out
+ br label %end
+
+end:
+ ret void
+}
+
+; BUG64: Applying bug work-around
+; BUG32: Applying bug work-around
+; NOBUG-NOT: Applying bug work-around
+; FUNC-LABEL: @nested8
+define void @nested8(i32 addrspace(1)* %out, i32 %cond) {
+entry:
+ %0 = icmp sgt i32 %cond, 0
+ br i1 %0, label %if.1, label %end
+
+if.1:
+ %1 = icmp sgt i32 %cond, 10
+ br i1 %1, label %if.2, label %if.1.store
+
+if.1.store:
+ store i32 1, i32 addrspace(1)* %out
+ br label %end
+
+if.2:
+ %2 = icmp sgt i32 %cond, 20
+ br i1 %2, label %if.3, label %if.2.store
+
+if.2.store:
+ store i32 2, i32 addrspace(1)* %out
+ br label %end
+
+if.3:
+ %3 = icmp sgt i32 %cond, 30
+ br i1 %3, label %if.4, label %if.3.store
+
+if.3.store:
+ store i32 3, i32 addrspace(1)* %out
+ br label %end
+
+if.4:
+ %4 = icmp sgt i32 %cond, 40
+ br i1 %4, label %if.5, label %if.4.store
+
+if.4.store:
+ store i32 4, i32 addrspace(1)* %out
+ br label %end
+
+if.5:
+ %5 = icmp sgt i32 %cond, 50
+ br i1 %5, label %if.6, label %if.5.store
+
+if.5.store:
+ store i32 5, i32 addrspace(1)* %out
+ br label %end
+
+if.6:
+ %6 = icmp sgt i32 %cond, 60
+ br i1 %6, label %if.7, label %if.6.store
+
+if.6.store:
+ store i32 6, i32 addrspace(1)* %out
+ br label %end
+
+if.7:
+ %7 = icmp sgt i32 %cond, 70
+ br i1 %7, label %if.8, label %if.7.store
+
+if.7.store:
+ store i32 7, i32 addrspace(1)* %out
+ br label %end
+
+if.8:
+ store i32 8, i32 addrspace(1)* %out
+ br label %end
+
+end:
+ ret void
+}
diff --git a/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
new file mode 100644
index 0000000..f8b4a61
--- /dev/null
+++ b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
@@ -0,0 +1,19 @@
+; RUN: opt -codegenprepare -S -o - %s | FileCheck --check-prefix=OPT --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-LLC --check-prefix=FUNC %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+target triple = "r600--"
+
+; FUNC-LABEL: @test
+; OPT: mul nsw i32
+; OPT-NEXT: sext
+; SI-LLC: V_MUL_LO_I32
+; SI-LLC-NOT: V_MUL_HI
+define void @test(i8 addrspace(1)* nocapture readonly %in, i32 %a, i8 %b) {
+entry:
+ %0 = mul nsw i32 %a, 3
+ %1 = sext i32 %0 to i64
+ %2 = getelementptr i8 addrspace(1)* %in, i64 %1
+ store i8 %b, i8 addrspace(1)* %2
+ ret void
+}
diff --git a/test/CodeGen/R600/elf.r600.ll b/test/CodeGen/R600/elf.r600.ll
index 0590efb..4436c07 100644
--- a/test/CodeGen/R600/elf.r600.ll
+++ b/test/CodeGen/R600/elf.r600.ll
@@ -6,7 +6,7 @@
; CONFIG-CHECK: .section .AMDGPU.config
; CONFIG-CHECK-NEXT: .long 166100
-; CONFIG-CHECK-NEXT: .long 258
+; CONFIG-CHECK-NEXT: .long 2
; CONFIG-CHECK-NEXT: .long 165900
; CONFIG-CHECK-NEXT: .long 0
define void @test(float addrspace(1)* %out, i32 %p) {
diff --git a/test/CodeGen/R600/extload.ll b/test/CodeGen/R600/extload.ll
index aa660b3..2e70d47 100644
--- a/test/CodeGen/R600/extload.ll
+++ b/test/CodeGen/R600/extload.ll
@@ -1,8 +1,9 @@
-; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; EG-LABEL: @anyext_load_i8:
+; FUNC-LABEL: @anyext_load_i8:
; EG: AND_INT
-; EG-NEXT: 255
+; EG: 255
define void @anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 addrspace(1)* nocapture noalias %src) nounwind {
%cast = bitcast i8 addrspace(1)* %src to i32 addrspace(1)*
%load = load i32 addrspace(1)* %cast, align 1
@@ -12,10 +13,11 @@ define void @anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 addrspac
ret void
}
-; EG-LABEL: @anyext_load_i16:
+; FUNC-LABEL: @anyext_load_i16:
; EG: AND_INT
-; EG: LSHL
-; EG: 65535
+; EG: AND_INT
+; EG-DAG: 65535
+; EG-DAG: -65536
define void @anyext_load_i16(i16 addrspace(1)* nocapture noalias %out, i16 addrspace(1)* nocapture noalias %src) nounwind {
%cast = bitcast i16 addrspace(1)* %src to i32 addrspace(1)*
%load = load i32 addrspace(1)* %cast, align 1
@@ -25,9 +27,9 @@ define void @anyext_load_i16(i16 addrspace(1)* nocapture noalias %out, i16 addrs
ret void
}
-; EG-LABEL: @anyext_load_lds_i8:
+; FUNC-LABEL: @anyext_load_lds_i8:
; EG: AND_INT
-; EG-NEXT: 255
+; EG: 255
define void @anyext_load_lds_i8(i8 addrspace(3)* nocapture noalias %out, i8 addrspace(3)* nocapture noalias %src) nounwind {
%cast = bitcast i8 addrspace(3)* %src to i32 addrspace(3)*
%load = load i32 addrspace(3)* %cast, align 1
@@ -37,10 +39,11 @@ define void @anyext_load_lds_i8(i8 addrspace(3)* nocapture noalias %out, i8 addr
ret void
}
-; EG-LABEL: @anyext_load_lds_i16:
+; FUNC-LABEL: @anyext_load_lds_i16:
+; EG: AND_INT
; EG: AND_INT
-; EG: LSHL
-; EG: 65535
+; EG-DAG: 65535
+; EG-DAG: -65536
define void @anyext_load_lds_i16(i16 addrspace(3)* nocapture noalias %out, i16 addrspace(3)* nocapture noalias %src) nounwind {
%cast = bitcast i16 addrspace(3)* %src to i32 addrspace(3)*
%load = load i32 addrspace(3)* %cast, align 1
@@ -49,3 +52,69 @@ define void @anyext_load_lds_i16(i16 addrspace(3)* nocapture noalias %out, i16 a
store <2 x i16> %x, <2 x i16> addrspace(3)* %castOut, align 1
ret void
}
+
+; FUNC-LABEL: @sextload_global_i8_to_i64
+; SI: BUFFER_LOAD_SBYTE [[LOAD:v[0-9]+]],
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 31, [[LOAD]]
+; SI: BUFFER_STORE_DWORDX2
+define void @sextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
+ %a = load i8 addrspace(1)* %in, align 8
+ %ext = sext i8 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sextload_global_i16_to_i64
+; SI: BUFFER_LOAD_SSHORT [[LOAD:v[0-9]+]],
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 31, [[LOAD]]
+; SI: BUFFER_STORE_DWORDX2
+define void @sextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+ %a = load i16 addrspace(1)* %in, align 8
+ %ext = sext i16 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sextload_global_i32_to_i64
+; SI: BUFFER_LOAD_DWORD [[LOAD:v[0-9]+]],
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 31, [[LOAD]]
+; SI: BUFFER_STORE_DWORDX2
+define void @sextload_global_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %a = load i32 addrspace(1)* %in, align 8
+ %ext = sext i32 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @zextload_global_i8_to_i64
+; SI: BUFFER_LOAD_UBYTE [[LOAD:v[0-9]+]],
+; SI: V_MOV_B32_e32 {{v[0-9]+}}, 0
+; SI: BUFFER_STORE_DWORDX2
+define void @zextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
+ %a = load i8 addrspace(1)* %in, align 8
+ %ext = zext i8 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @zextload_global_i16_to_i64
+; SI: BUFFER_LOAD_USHORT [[LOAD:v[0-9]+]],
+; SI: V_MOV_B32_e32 {{v[0-9]+}}, 0
+; SI: BUFFER_STORE_DWORDX2
+define void @zextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+ %a = load i16 addrspace(1)* %in, align 8
+ %ext = zext i16 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @zextload_global_i32_to_i64
+; SI: BUFFER_LOAD_DWORD [[LOAD:v[0-9]+]],
+; SI: V_MOV_B32_e32 {{v[0-9]+}}, 0
+; SI: BUFFER_STORE_DWORDX2
+define void @zextload_global_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %a = load i32 addrspace(1)* %in, align 8
+ %ext = zext i32 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/fabs.ll b/test/CodeGen/R600/fabs.ll
index a5f5df9..2cd3a4f 100644
--- a/test/CodeGen/R600/fabs.ll
+++ b/test/CodeGen/R600/fabs.ll
@@ -9,7 +9,7 @@
; R600-CHECK-NOT: AND
; R600-CHECK: |PV.{{[XYZW]}}|
; SI-CHECK-LABEL: @fabs_free
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 1, 0, 0, 0
+; SI-CHECK: V_AND_B32
define void @fabs_free(float addrspace(1)* %out, i32 %in) {
entry:
@@ -23,8 +23,8 @@ entry:
; R600-CHECK: |{{(PV|T[0-9])\.[XYZW]}}|
; R600-CHECK: |{{(PV|T[0-9])\.[XYZW]}}|
; SI-CHECK-LABEL: @fabs_v2
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 1, 0, 0, 0
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 1, 0, 0, 0
+; SI-CHECK: V_AND_B32
+; SI-CHECK: V_AND_B32
define void @fabs_v2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
entry:
%0 = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in)
@@ -38,10 +38,10 @@ entry:
; R600-CHECK: |{{(PV|T[0-9])\.[XYZW]}}|
; R600-CHECK: |{{(PV|T[0-9])\.[XYZW]}}|
; SI-CHECK-LABEL: @fabs_v4
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 1, 0, 0, 0
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 1, 0, 0, 0
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 1, 0, 0, 0
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 1, 0, 0, 0
+; SI-CHECK: V_AND_B32
+; SI-CHECK: V_AND_B32
+; SI-CHECK: V_AND_B32
+; SI-CHECK: V_AND_B32
define void @fabs_v4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
entry:
%0 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %in)
diff --git a/test/CodeGen/R600/fadd.ll b/test/CodeGen/R600/fadd.ll
index f467bb7..5d2b806 100644
--- a/test/CodeGen/R600/fadd.ll
+++ b/test/CodeGen/R600/fadd.ll
@@ -1,9 +1,8 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
-; R600-CHECK: @fadd_f32
+; FUNC-LABEL: @fadd_f32
; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W
-; SI-CHECK: @fadd_f32
; SI-CHECK: V_ADD_F32
define void @fadd_f32(float addrspace(1)* %out, float %a, float %b) {
entry:
@@ -12,10 +11,9 @@ entry:
ret void
}
-; R600-CHECK: @fadd_v2f32
+; FUNC-LABEL: @fadd_v2f32
; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
-; SI-CHECK: @fadd_v2f32
; SI-CHECK: V_ADD_F32
; SI-CHECK: V_ADD_F32
define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
@@ -25,12 +23,11 @@ entry:
ret void
}
-; R600-CHECK: @fadd_v4f32
+; FUNC-LABEL: @fadd_v4f32
; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI-CHECK: @fadd_v4f32
; SI-CHECK: V_ADD_F32
; SI-CHECK: V_ADD_F32
; SI-CHECK: V_ADD_F32
@@ -43,3 +40,27 @@ define void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)
store <4 x float> %result, <4 x float> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @fadd_v8f32
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+define void @fadd_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) {
+entry:
+ %0 = fadd <8 x float> %a, %b
+ store <8 x float> %0, <8 x float> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fceil.ll b/test/CodeGen/R600/fceil.ll
new file mode 100644
index 0000000..b8b945f
--- /dev/null
+++ b/test/CodeGen/R600/fceil.ll
@@ -0,0 +1,84 @@
+; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI %s
+
+declare double @llvm.ceil.f64(double) nounwind readnone
+declare <2 x double> @llvm.ceil.v2f64(<2 x double>) nounwind readnone
+declare <3 x double> @llvm.ceil.v3f64(<3 x double>) nounwind readnone
+declare <4 x double> @llvm.ceil.v4f64(<4 x double>) nounwind readnone
+declare <8 x double> @llvm.ceil.v8f64(<8 x double>) nounwind readnone
+declare <16 x double> @llvm.ceil.v16f64(<16 x double>) nounwind readnone
+
+; CI-LABEL: @fceil_f64:
+; CI: V_CEIL_F64_e32
+define void @fceil_f64(double addrspace(1)* %out, double %x) {
+ %y = call double @llvm.ceil.f64(double %x) nounwind readnone
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CI-LABEL: @fceil_v2f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
+ %y = call <2 x double> @llvm.ceil.v2f64(<2 x double> %x) nounwind readnone
+ store <2 x double> %y, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FIXME-CI-LABEL: @fceil_v3f64:
+; FIXME-CI: V_CEIL_F64_e32
+; FIXME-CI: V_CEIL_F64_e32
+; FIXME-CI: V_CEIL_F64_e32
+; define void @fceil_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
+; %y = call <3 x double> @llvm.ceil.v3f64(<3 x double> %x) nounwind readnone
+; store <3 x double> %y, <3 x double> addrspace(1)* %out
+; ret void
+; }
+
+; CI-LABEL: @fceil_v4f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
+ %y = call <4 x double> @llvm.ceil.v4f64(<4 x double> %x) nounwind readnone
+ store <4 x double> %y, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; CI-LABEL: @fceil_v8f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
+ %y = call <8 x double> @llvm.ceil.v8f64(<8 x double> %x) nounwind readnone
+ store <8 x double> %y, <8 x double> addrspace(1)* %out
+ ret void
+}
+
+; CI-LABEL: @fceil_v16f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
+ %y = call <16 x double> @llvm.ceil.v16f64(<16 x double> %x) nounwind readnone
+ store <16 x double> %y, <16 x double> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/ffloor.ll b/test/CodeGen/R600/ffloor.ll
new file mode 100644
index 0000000..51d2b89
--- /dev/null
+++ b/test/CodeGen/R600/ffloor.ll
@@ -0,0 +1,84 @@
+; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI %s
+
+declare double @llvm.floor.f64(double) nounwind readnone
+declare <2 x double> @llvm.floor.v2f64(<2 x double>) nounwind readnone
+declare <3 x double> @llvm.floor.v3f64(<3 x double>) nounwind readnone
+declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone
+declare <8 x double> @llvm.floor.v8f64(<8 x double>) nounwind readnone
+declare <16 x double> @llvm.floor.v16f64(<16 x double>) nounwind readnone
+
+; CI-LABEL: @ffloor_f64:
+; CI: V_FLOOR_F64_e32
+define void @ffloor_f64(double addrspace(1)* %out, double %x) {
+ %y = call double @llvm.floor.f64(double %x) nounwind readnone
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CI-LABEL: @ffloor_v2f64:
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
+ %y = call <2 x double> @llvm.floor.v2f64(<2 x double> %x) nounwind readnone
+ store <2 x double> %y, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FIXME-CI-LABEL: @ffloor_v3f64:
+; FIXME-CI: V_FLOOR_F64_e32
+; FIXME-CI: V_FLOOR_F64_e32
+; FIXME-CI: V_FLOOR_F64_e32
+; define void @ffloor_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
+; %y = call <3 x double> @llvm.floor.v3f64(<3 x double> %x) nounwind readnone
+; store <3 x double> %y, <3 x double> addrspace(1)* %out
+; ret void
+; }
+
+; CI-LABEL: @ffloor_v4f64:
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+define void @ffloor_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
+ %y = call <4 x double> @llvm.floor.v4f64(<4 x double> %x) nounwind readnone
+ store <4 x double> %y, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; CI-LABEL: @ffloor_v8f64:
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+define void @ffloor_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
+ %y = call <8 x double> @llvm.floor.v8f64(<8 x double> %x) nounwind readnone
+ store <8 x double> %y, <8 x double> addrspace(1)* %out
+ ret void
+}
+
+; CI-LABEL: @ffloor_v16f64:
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+define void @ffloor_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
+ %y = call <16 x double> @llvm.floor.v16f64(<16 x double> %x) nounwind readnone
+ store <16 x double> %y, <16 x double> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fneg-fabs.ll b/test/CodeGen/R600/fneg-fabs.ll
new file mode 100644
index 0000000..d95e131
--- /dev/null
+++ b/test/CodeGen/R600/fneg-fabs.ll
@@ -0,0 +1,55 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+
+; DAGCombiner will transform:
+; (fabs (f32 bitcast (i32 a))) => (f32 bitcast (and (i32 a), 0x7FFFFFFF))
+; unless isFabsFree returns true
+
+; R600-CHECK-LABEL: @fneg_fabs_free
+; R600-CHECK-NOT: AND
+; R600-CHECK: |PV.{{[XYZW]}}|
+; R600-CHECK: -PV
+; SI-CHECK-LABEL: @fneg_fabs_free
+; SI-CHECK: V_OR_B32
+
+define void @fneg_fabs_free(float addrspace(1)* %out, i32 %in) {
+entry:
+ %0 = bitcast i32 %in to float
+ %1 = call float @fabs(float %0)
+ %2 = fsub float -0.000000e+00, %1
+ store float %2, float addrspace(1)* %out
+ ret void
+}
+
+; R600-CHECK-LABEL: @fneg_fabs_v2
+; R600-CHECK: |{{(PV|T[0-9])\.[XYZW]}}|
+; R600-CHECK: -PV
+; R600-CHECK: |{{(PV|T[0-9])\.[XYZW]}}|
+; R600-CHECK: -PV
+; SI-CHECK-LABEL: @fneg_fabs_v2
+; SI-CHECK: V_OR_B32
+; SI-CHECK: V_OR_B32
+define void @fneg_fabs_v2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+entry:
+ %0 = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in)
+ %1 = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %0
+ store <2 x float> %1, <2 x float> addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @fneg_fabs_v4
+; SI-CHECK: V_OR_B32
+; SI-CHECK: V_OR_B32
+; SI-CHECK: V_OR_B32
+; SI-CHECK: V_OR_B32
+define void @fneg_fabs_v4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+entry:
+ %0 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %in)
+ %1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %0
+ store <4 x float> %1, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+declare float @fabs(float ) readnone
+declare <2 x float> @llvm.fabs.v2f32(<2 x float> ) readnone
+declare <4 x float> @llvm.fabs.v4f32(<4 x float> ) readnone
diff --git a/test/CodeGen/R600/fneg.ll b/test/CodeGen/R600/fneg.ll
index 9446aa8..f4e6be6 100644
--- a/test/CodeGen/R600/fneg.ll
+++ b/test/CodeGen/R600/fneg.ll
@@ -4,7 +4,7 @@
; R600-CHECK-LABEL: @fneg
; R600-CHECK: -PV
; SI-CHECK-LABEL: @fneg
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 0, 0, 0, 1
+; SI-CHECK: V_XOR_B32
define void @fneg(float addrspace(1)* %out, float %in) {
entry:
%0 = fsub float -0.000000e+00, %in
@@ -16,8 +16,8 @@ entry:
; R600-CHECK: -PV
; R600-CHECK: -PV
; SI-CHECK-LABEL: @fneg_v2
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 0, 0, 0, 1
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 0, 0, 0, 1
+; SI-CHECK: V_XOR_B32
+; SI-CHECK: V_XOR_B32
define void @fneg_v2(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) {
entry:
%0 = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %in
@@ -31,10 +31,10 @@ entry:
; R600-CHECK: -PV
; R600-CHECK: -PV
; SI-CHECK-LABEL: @fneg_v4
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 0, 0, 0, 1
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 0, 0, 0, 1
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 0, 0, 0, 1
-; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 0, 0, 0, 1
+; SI-CHECK: V_XOR_B32
+; SI-CHECK: V_XOR_B32
+; SI-CHECK: V_XOR_B32
+; SI-CHECK: V_XOR_B32
define void @fneg_v4(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) {
entry:
%0 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %in
diff --git a/test/CodeGen/R600/ftrunc.ll b/test/CodeGen/R600/ftrunc.ll
new file mode 100644
index 0000000..6b235ff
--- /dev/null
+++ b/test/CodeGen/R600/ftrunc.ll
@@ -0,0 +1,84 @@
+; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI %s
+
+declare double @llvm.trunc.f64(double) nounwind readnone
+declare <2 x double> @llvm.trunc.v2f64(<2 x double>) nounwind readnone
+declare <3 x double> @llvm.trunc.v3f64(<3 x double>) nounwind readnone
+declare <4 x double> @llvm.trunc.v4f64(<4 x double>) nounwind readnone
+declare <8 x double> @llvm.trunc.v8f64(<8 x double>) nounwind readnone
+declare <16 x double> @llvm.trunc.v16f64(<16 x double>) nounwind readnone
+
+; CI-LABEL: @ftrunc_f64:
+; CI: V_TRUNC_F64_e32
+define void @ftrunc_f64(double addrspace(1)* %out, double %x) {
+ %y = call double @llvm.trunc.f64(double %x) nounwind readnone
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CI-LABEL: @ftrunc_v2f64:
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+define void @ftrunc_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
+ %y = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) nounwind readnone
+ store <2 x double> %y, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FIXME-CI-LABEL: @ftrunc_v3f64:
+; FIXME-CI: V_TRUNC_F64_e32
+; FIXME-CI: V_TRUNC_F64_e32
+; FIXME-CI: V_TRUNC_F64_e32
+; define void @ftrunc_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
+; %y = call <3 x double> @llvm.trunc.v3f64(<3 x double> %x) nounwind readnone
+; store <3 x double> %y, <3 x double> addrspace(1)* %out
+; ret void
+; }
+
+; CI-LABEL: @ftrunc_v4f64:
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+define void @ftrunc_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
+ %y = call <4 x double> @llvm.trunc.v4f64(<4 x double> %x) nounwind readnone
+ store <4 x double> %y, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; CI-LABEL: @ftrunc_v8f64:
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+define void @ftrunc_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
+ %y = call <8 x double> @llvm.trunc.v8f64(<8 x double> %x) nounwind readnone
+ store <8 x double> %y, <8 x double> addrspace(1)* %out
+ ret void
+}
+
+; CI-LABEL: @ftrunc_v16f64:
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+; CI: V_TRUNC_F64_e32
+define void @ftrunc_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
+ %y = call <16 x double> @llvm.trunc.v16f64(<16 x double> %x) nounwind readnone
+ store <16 x double> %y, <16 x double> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/gep-address-space.ll b/test/CodeGen/R600/gep-address-space.ll
index 4ea21dd..ee914fa 100644
--- a/test/CodeGen/R600/gep-address-space.ll
+++ b/test/CodeGen/R600/gep-address-space.ll
@@ -1,13 +1,23 @@
; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck %s
define void @use_gep_address_space([1024 x i32] addrspace(3)* %array) nounwind {
-; CHECK-LABEL @use_gep_address_space:
-; CHECK: S_ADD_I32
+; CHECK-LABEL: @use_gep_address_space:
+; CHECK: V_MOV_B32_e32 [[PTR:v[0-9]+]], s{{[0-9]+}}
+; CHECK: DS_WRITE_B32 [[PTR]], v{{[0-9]+}}, 64
%p = getelementptr [1024 x i32] addrspace(3)* %array, i16 0, i16 16
store i32 99, i32 addrspace(3)* %p
ret void
}
+define void @use_gep_address_space_large_offset([1024 x i32] addrspace(3)* %array) nounwind {
+; CHECK-LABEL: @use_gep_address_space_large_offset:
+; CHECK: S_ADD_I32
+; CHECK: DS_WRITE_B32
+ %p = getelementptr [1024 x i32] addrspace(3)* %array, i16 0, i16 16384
+ store i32 99, i32 addrspace(3)* %p
+ ret void
+}
+
define void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind {
; CHECK-LABEL: @gep_as_vector_v4:
; CHECK: S_ADD_I32
diff --git a/test/CodeGen/R600/gv-const-addrspace.ll b/test/CodeGen/R600/gv-const-addrspace.ll
new file mode 100644
index 0000000..cda7ab1
--- /dev/null
+++ b/test/CodeGen/R600/gv-const-addrspace.ll
@@ -0,0 +1,41 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600 --check-prefix=FUNC
+
+; XXX: Test on SI once 64-bit adds are supportes.
+
+@float_gv = internal addrspace(2) unnamed_addr constant [5 x float] [float 0.0, float 1.0, float 2.0, float 3.0, float 4.0], align 4
+
+; FUNC-LABEL: @float
+
+; R600-DAG: MOV {{\** *}}T2.X
+; R600-DAG: MOV {{\** *}}T3.X
+; R600-DAG: MOV {{\** *}}T4.X
+; R600-DAG: MOV {{\** *}}T5.X
+; R600-DAG: MOV {{\** *}}T6.X
+; R600: MOVA_INT
+
+define void @float(float addrspace(1)* %out, i32 %index) {
+entry:
+ %0 = getelementptr inbounds [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
+ %1 = load float addrspace(2)* %0
+ store float %1, float addrspace(1)* %out
+ ret void
+}
+
+@i32_gv = internal addrspace(2) unnamed_addr constant [5 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4], align 4
+
+; FUNC-LABEL: @i32
+
+; R600-DAG: MOV {{\** *}}T2.X
+; R600-DAG: MOV {{\** *}}T3.X
+; R600-DAG: MOV {{\** *}}T4.X
+; R600-DAG: MOV {{\** *}}T5.X
+; R600-DAG: MOV {{\** *}}T6.X
+; R600: MOVA_INT
+
+define void @i32(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %0 = getelementptr inbounds [5 x i32] addrspace(2)* @i32_gv, i32 0, i32 %index
+ %1 = load i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/icmp64.ll b/test/CodeGen/R600/icmp64.ll
new file mode 100644
index 0000000..c9e62ff
--- /dev/null
+++ b/test/CodeGen/R600/icmp64.ll
@@ -0,0 +1,92 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @test_i64_eq:
+; SI: V_CMP_EQ_I64
+define void @test_i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp eq i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_ne:
+; SI: V_CMP_NE_I64
+define void @test_i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp ne i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_slt:
+; SI: V_CMP_LT_I64
+define void @test_i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp slt i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_ult:
+; SI: V_CMP_LT_U64
+define void @test_i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp ult i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_sle:
+; SI: V_CMP_LE_I64
+define void @test_i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp sle i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_ule:
+; SI: V_CMP_LE_U64
+define void @test_i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp ule i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_sgt:
+; SI: V_CMP_GT_I64
+define void @test_i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp sgt i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_ugt:
+; SI: V_CMP_GT_U64
+define void @test_i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp ugt i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_sge:
+; SI: V_CMP_GE_I64
+define void @test_i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp sge i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_uge:
+; SI: V_CMP_GE_U64
+define void @test_i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp uge i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
diff --git a/test/CodeGen/R600/indirect-private-64.ll b/test/CodeGen/R600/indirect-private-64.ll
new file mode 100644
index 0000000..4d1f734
--- /dev/null
+++ b/test/CodeGen/R600/indirect-private-64.ll
@@ -0,0 +1,75 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare void @llvm.AMDGPU.barrier.local() noduplicate nounwind
+
+; SI-LABEL: @private_access_f64_alloca:
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELS_B32_e32
+; SI: V_MOVRELS_B32_e32
+define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) nounwind {
+ %val = load double addrspace(1)* %in, align 8
+ %array = alloca double, i32 16, align 8
+ %ptr = getelementptr double* %array, i32 %b
+ store double %val, double* %ptr, align 8
+ call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
+ %result = load double* %ptr, align 8
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @private_access_v2f64_alloca:
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELS_B32_e32
+; SI: V_MOVRELS_B32_e32
+; SI: V_MOVRELS_B32_e32
+; SI: V_MOVRELS_B32_e32
+define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind {
+ %val = load <2 x double> addrspace(1)* %in, align 16
+ %array = alloca <2 x double>, i32 16, align 16
+ %ptr = getelementptr <2 x double>* %array, i32 %b
+ store <2 x double> %val, <2 x double>* %ptr, align 16
+ call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
+ %result = load <2 x double>* %ptr, align 16
+ store <2 x double> %result, <2 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @private_access_i64_alloca:
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELS_B32_e32
+; SI: V_MOVRELS_B32_e32
+define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) nounwind {
+ %val = load i64 addrspace(1)* %in, align 8
+ %array = alloca i64, i32 16, align 8
+ %ptr = getelementptr i64* %array, i32 %b
+ store i64 %val, i64* %ptr, align 8
+ call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
+ %result = load i64* %ptr, align 8
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @private_access_v2i64_alloca:
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELD_B32_e32
+; SI: V_MOVRELS_B32_e32
+; SI: V_MOVRELS_B32_e32
+; SI: V_MOVRELS_B32_e32
+; SI: V_MOVRELS_B32_e32
+define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind {
+ %val = load <2 x i64> addrspace(1)* %in, align 16
+ %array = alloca <2 x i64>, i32 16, align 16
+ %ptr = getelementptr <2 x i64>* %array, i32 %b
+ store <2 x i64> %val, <2 x i64>* %ptr, align 16
+ call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
+ %result = load <2 x i64>* %ptr, align 16
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/infinite-loop-evergreen.ll b/test/CodeGen/R600/infinite-loop-evergreen.ll
new file mode 100644
index 0000000..f6e39b3
--- /dev/null
+++ b/test/CodeGen/R600/infinite-loop-evergreen.ll
@@ -0,0 +1,10 @@
+; XFAIL: *
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck %s
+
+define void @inf_loop_irreducible_cfg() nounwind {
+entry:
+ br label %block
+
+block:
+ br label %block
+}
diff --git a/test/CodeGen/R600/infinite-loop.ll b/test/CodeGen/R600/infinite-loop.ll
new file mode 100644
index 0000000..a60bc37
--- /dev/null
+++ b/test/CodeGen/R600/infinite-loop.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @infinite_loop:
+; SI: V_MOV_B32_e32 [[REG:v[0-9]+]], 999
+; SI: BB0_1:
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_WAITCNT vmcnt(0) expcnt(0)
+; SI: S_BRANCH BB0_1
+define void @infinite_loop(i32 addrspace(1)* %out) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ store i32 999, i32 addrspace(1)* %out, align 4
+ br label %for.body
+}
+
diff --git a/test/CodeGen/R600/insert_vector_elt.ll b/test/CodeGen/R600/insert_vector_elt.ll
index 05aecce..530d1cc 100644
--- a/test/CodeGen/R600/insert_vector_elt.ll
+++ b/test/CodeGen/R600/insert_vector_elt.ll
@@ -1,16 +1,175 @@
-; XFAIL: *
-; RUN: llc < %s -march=r600 -mcpu=redwood -o %t
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
-define void @var_insert(<4 x i32> addrspace(1)* %out, <4 x i32> %x, i32 %val, i32 %idx) nounwind {
-entry:
- %tmp3 = insertelement <4 x i32> %x, i32 %val, i32 %idx ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp3, <4 x i32> addrspace(1)* %out
+; FIXME: Broken on evergreen
+; FIXME: For some reason the 8 and 16 vectors are being stored as
+; individual elements instead of 128-bit stores.
+
+
+; FIXME: Why is the constant moved into the intermediate register and
+; not just directly into the vector component?
+
+; SI-LABEL: @insertelement_v4f32_0:
+; S_LOAD_DWORDX4 s{{[}}[[LOW_REG:[0-9]+]]:
+; V_MOV_B32_e32
+; V_MOV_B32_e32 [[CONSTREG:v[0-9]+]], 5.000000e+00
+; V_MOV_B32_e32 v[[LOW_REG]], [[CONSTREG]]
+; BUFFER_STORE_DWORDX4 v{{[}}[[LOW_REG]]:
+define void @insertelement_v4f32_0(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+ %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 0
+ store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @insertelement_v4f32_1:
+define void @insertelement_v4f32_1(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+ %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 1
+ store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @insertelement_v4f32_2:
+define void @insertelement_v4f32_2(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+ %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 2
+ store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @insertelement_v4f32_3:
+define void @insertelement_v4f32_3(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+ %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 3
+ store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @insertelement_v4i32_0:
+define void @insertelement_v4i32_0(<4 x i32> addrspace(1)* %out, <4 x i32> %a) nounwind {
+ %vecins = insertelement <4 x i32> %a, i32 999, i32 0
+ store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v2f32:
+; SI: V_MOV_B32_e32 [[CONST:v[0-9]+]], 5.000000e+00
+; SI: V_MOVRELD_B32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
+; SI: BUFFER_STORE_DWORDX2 {{v\[}}[[LOW_RESULT_REG]]:
+define void @dynamic_insertelement_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x float> %a, float 5.000000e+00, i32 %b
+ store <2 x float> %vecins, <2 x float> addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v4f32:
+; SI: V_MOV_B32_e32 [[CONST:v[0-9]+]], 5.000000e+00
+; SI: V_MOVRELD_B32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
+; SI: BUFFER_STORE_DWORDX4 {{v\[}}[[LOW_RESULT_REG]]:
+define void @dynamic_insertelement_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %b) nounwind {
+ %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %b
+ store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v8f32:
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, i32 %b) nounwind {
+ %vecins = insertelement <8 x float> %a, float 5.000000e+00, i32 %b
+ store <8 x float> %vecins, <8 x float> addrspace(1)* %out, align 32
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v16f32:
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a, i32 %b) nounwind {
+ %vecins = insertelement <16 x float> %a, float 5.000000e+00, i32 %b
+ store <16 x float> %vecins, <16 x float> addrspace(1)* %out, align 64
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v2i32:
+; SI: BUFFER_STORE_DWORDX2
+define void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x i32> %a, i32 5, i32 %b
+ store <2 x i32> %vecins, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v4i32:
+; SI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, i32 %b) nounwind {
+ %vecins = insertelement <4 x i32> %a, i32 5, i32 %b
+ store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v8i32:
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, i32 %b) nounwind {
+ %vecins = insertelement <8 x i32> %a, i32 5, i32 %b
+ store <8 x i32> %vecins, <8 x i32> addrspace(1)* %out, align 32
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v16i32:
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> %a, i32 %b) nounwind {
+ %vecins = insertelement <16 x i32> %a, i32 5, i32 %b
+ store <16 x i32> %vecins, <16 x i32> addrspace(1)* %out, align 64
+ ret void
+}
+
+
+; SI-LABEL: @dynamic_insertelement_v2i16:
+; FIXMESI: BUFFER_STORE_DWORDX2
+define void @dynamic_insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x i16> %a, i16 5, i32 %b
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v4i16:
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, i32 %b) nounwind {
+ %vecins = insertelement <4 x i16> %a, i16 5, i32 %b
+ store <4 x i16> %vecins, <4 x i16> addrspace(1)* %out, align 16
+ ret void
+}
+
+
+; SI-LABEL: @dynamic_insertelement_v2i8:
+; FIXMESI: BUFFER_STORE_USHORT
+define void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x i8> %a, i8 5, i32 %b
+ store <2 x i8> %vecins, <2 x i8> addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v4i8:
+; FIXMESI: BUFFER_STORE_DWORD
+define void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, i32 %b) nounwind {
+ %vecins = insertelement <4 x i8> %a, i8 5, i32 %b
+ store <4 x i8> %vecins, <4 x i8> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v8i8:
+; FIXMESI: BUFFER_STORE_DWORDX2
+define void @dynamic_insertelement_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> %a, i32 %b) nounwind {
+ %vecins = insertelement <8 x i8> %a, i8 5, i32 %b
+ store <8 x i8> %vecins, <8 x i8> addrspace(1)* %out, align 16
ret void
}
-define void @var_extract(i32 addrspace(1)* %out, <4 x i32> %x, i32 %idx) nounwind {
-entry:
- %tmp3 = extractelement <4 x i32> %x, i32 %idx ; <<i32>> [#uses=1]
- store i32 %tmp3, i32 addrspace(1)* %out
+; SI-LABEL: @dynamic_insertelement_v16i8:
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> %a, i32 %b) nounwind {
+ %vecins = insertelement <16 x i8> %a, i8 5, i32 %b
+ store <16 x i8> %vecins, <16 x i8> addrspace(1)* %out, align 16
ret void
}
diff --git a/test/CodeGen/R600/insert_vector_elt_f64.ll b/test/CodeGen/R600/insert_vector_elt_f64.ll
new file mode 100644
index 0000000..e334be1
--- /dev/null
+++ b/test/CodeGen/R600/insert_vector_elt_f64.ll
@@ -0,0 +1,36 @@
+; REQUIRES: asserts
+; XFAIL: *
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+
+; SI-LABEL: @dynamic_insertelement_v2f64:
+; SI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x double> %a, double 8.0, i32 %b
+ store <2 x double> %vecins, <2 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v2f64:
+; SI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x i64> %a, i64 5, i32 %b
+ store <2 x i64> %vecins, <2 x i64> addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v4f64:
+; SI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, i32 %b) nounwind {
+ %vecins = insertelement <4 x double> %a, double 8.0, i32 %b
+ store <4 x double> %vecins, <4 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v8f64:
+; SI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, i32 %b) nounwind {
+ %vecins = insertelement <8 x double> %a, double 8.0, i32 %b
+ store <8 x double> %vecins, <8 x double> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/jump-address.ll b/test/CodeGen/R600/jump-address.ll
index ae9c8bb..a1cd388 100644
--- a/test/CodeGen/R600/jump-address.ll
+++ b/test/CodeGen/R600/jump-address.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: JUMP @3
+; CHECK: JUMP @6
; CHECK: EXPORT
; CHECK-NOT: EXPORT
diff --git a/test/CodeGen/R600/lds-oqap-crash.ll b/test/CodeGen/R600/lds-oqap-crash.ll
new file mode 100644
index 0000000..7959150
--- /dev/null
+++ b/test/CodeGen/R600/lds-oqap-crash.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood -verify-machineinstrs | FileCheck %s
+
+; The test is for a bug in R600EmitClauseMarkers.cpp where this pass
+; was searching for a use of the OQAP register in order to determine
+; if an LDS instruction could fit in the current clause, but never finding
+; one. This created an infinite loop and hung the compiler.
+;
+; The LDS instruction should not have been defining OQAP in the first place,
+; because the LDS instructions are pseudo instructions and the OQAP
+; reads and writes are bundled together in the same instruction.
+
+; CHECK: @lds_crash
+define void @lds_crash(i32 addrspace(1)* %out, i32 addrspace(3)* %in, i32 %a, i32 %b, i32 %c) {
+entry:
+ %0 = load i32 addrspace(3)* %in
+ ; This block needs to be > 115 ISA instructions to hit the bug,
+ ; so we'll use udiv instructions.
+ %div0 = udiv i32 %0, %b
+ %div1 = udiv i32 %div0, %a
+ %div2 = udiv i32 %div1, 11
+ %div3 = udiv i32 %div2, %a
+ %div4 = udiv i32 %div3, %b
+ %div5 = udiv i32 %div4, %c
+ %div6 = udiv i32 %div5, %div0
+ %div7 = udiv i32 %div6, %div1
+ store i32 %div7, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/lds-output-queue.ll b/test/CodeGen/R600/lds-output-queue.ll
index 63a4332..af0db0d 100644
--- a/test/CodeGen/R600/lds-output-queue.ll
+++ b/test/CodeGen/R600/lds-output-queue.ll
@@ -87,7 +87,7 @@ declare void @llvm.AMDGPU.barrier.local()
; CHECK-LABEL: @local_global_alias
; CHECK: LDS_READ_RET
; CHECK-NOT: ALU clause
-; CHECK MOV * T{{[0-9]\.[XYZW]}}, OQAP
+; CHECK: MOV * T{{[0-9]\.[XYZW]}}, OQAP
define void @local_global_alias(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = getelementptr inbounds [2 x i32] addrspace(3)* @local_mem, i32 0, i32 0
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll b/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
new file mode 100644
index 0000000..c3f000a
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
@@ -0,0 +1,40 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.bfe.i32(i32, i32, i32) nounwind readnone
+
+; FUNC-LABEL: @bfe_i32_arg_arg_arg
+; SI: V_BFE_I32
+; EG: BFE_INT
+define void @bfe_i32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 %src1, i32 %src1) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_arg_arg_imm
+; SI: V_BFE_I32
+; EG: BFE_INT
+define void @bfe_i32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 %src1, i32 123) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_arg_imm_arg
+; SI: V_BFE_I32
+; EG: BFE_INT
+define void @bfe_i32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 123, i32 %src2) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_imm_arg_arg
+; SI: V_BFE_I32
+; EG: BFE_INT
+define void @bfe_i32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 123, i32 %src1, i32 %src2) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll b/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
new file mode 100644
index 0000000..0d47863
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
@@ -0,0 +1,40 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.bfe.u32(i32, i32, i32) nounwind readnone
+
+; FUNC-LABEL: @bfe_u32_arg_arg_arg
+; SI: V_BFE_U32
+; EG: BFE_UINT
+define void @bfe_u32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 %src1, i32 %src1) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_arg_arg_imm
+; SI: V_BFE_U32
+; EG: BFE_UINT
+define void @bfe_u32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 %src1, i32 123) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_arg_imm_arg
+; SI: V_BFE_U32
+; EG: BFE_UINT
+define void @bfe_u32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 123, i32 %src2) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_imm_arg_arg
+; SI: V_BFE_U32
+; EG: BFE_UINT
+define void @bfe_u32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 123, i32 %src1, i32 %src2) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfi.ll b/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
new file mode 100644
index 0000000..e1de45b
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
@@ -0,0 +1,41 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.bfi(i32, i32, i32) nounwind readnone
+
+; FUNC-LABEL: @bfi_arg_arg_arg
+; SI: V_BFI_B32
+; EG: BFI_INT
+define void @bfi_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
+ %bfi = call i32 @llvm.AMDGPU.bfi(i32 %src0, i32 %src1, i32 %src1) nounwind readnone
+ store i32 %bfi, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfi_arg_arg_imm
+; SI: V_BFI_B32
+; EG: BFI_INT
+define void @bfi_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfi = call i32 @llvm.AMDGPU.bfi(i32 %src0, i32 %src1, i32 123) nounwind readnone
+ store i32 %bfi, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfi_arg_imm_arg
+; SI: V_BFI_B32
+; EG: BFI_INT
+define void @bfi_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
+ %bfi = call i32 @llvm.AMDGPU.bfi(i32 %src0, i32 123, i32 %src2) nounwind readnone
+ store i32 %bfi, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfi_imm_arg_arg
+; SI: V_BFI_B32
+; EG: BFI_INT
+define void @bfi_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
+ %bfi = call i32 @llvm.AMDGPU.bfi(i32 123, i32 %src1, i32 %src2) nounwind readnone
+ store i32 %bfi, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfm.ll b/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
new file mode 100644
index 0000000..ef8721e
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
@@ -0,0 +1,40 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.bfm(i32, i32) nounwind readnone
+
+; FUNC-LABEL: @bfm_arg_arg
+; SI: V_BFM
+; EG: BFM_INT
+define void @bfm_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfm = call i32 @llvm.AMDGPU.bfm(i32 %src0, i32 %src1) nounwind readnone
+ store i32 %bfm, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfm_arg_imm
+; SI: V_BFM
+; EG: BFM_INT
+define void @bfm_arg_imm(i32 addrspace(1)* %out, i32 %src0) nounwind {
+ %bfm = call i32 @llvm.AMDGPU.bfm(i32 %src0, i32 123) nounwind readnone
+ store i32 %bfm, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfm_imm_arg
+; SI: V_BFM
+; EG: BFM_INT
+define void @bfm_imm_arg(i32 addrspace(1)* %out, i32 %src1) nounwind {
+ %bfm = call i32 @llvm.AMDGPU.bfm(i32 123, i32 %src1) nounwind readnone
+ store i32 %bfm, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfm_imm_imm
+; SI: V_BFM
+; EG: BFM_INT
+define void @bfm_imm_imm(i32 addrspace(1)* %out) nounwind {
+ %bfm = call i32 @llvm.AMDGPU.bfm(i32 123, i32 456) nounwind readnone
+ store i32 %bfm, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imax.ll b/test/CodeGen/R600/llvm.AMDGPU.imax.ll
index 1336f4e..01c9f43 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imax.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imax.ll
@@ -1,12 +1,23 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-;CHECK: V_MAX_I32_e32
-
-define void @main(i32 %p0, i32 %p1) #0 {
+; SI-LABEL: @vector_imax
+; SI: V_MAX_I32_e32
+define void @vector_imax(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
- %0 = call i32 @llvm.AMDGPU.imax(i32 %p0, i32 %p1)
- %1 = bitcast i32 %0 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %1, float %1, float %1, float %1)
+ %load = load i32 addrspace(1)* %in, align 4
+ %max = call i32 @llvm.AMDGPU.imax(i32 %p0, i32 %load)
+ %bc = bitcast i32 %max to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @scalar_imax
+; SI: S_MAX_I32
+define void @scalar_imax(i32 %p0, i32 %p1) #0 {
+entry:
+ %max = call i32 @llvm.AMDGPU.imax(i32 %p0, i32 %p1)
+ %bc = bitcast i32 %max to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
ret void
}
@@ -15,7 +26,7 @@ declare i32 @llvm.AMDGPU.imax(i32, i32) #1
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-attributes #0 = { "ShaderType"="0" }
-attributes #1 = { readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
!0 = metadata !{metadata !"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imin.ll b/test/CodeGen/R600/llvm.AMDGPU.imin.ll
index 3435ea4..565bf34 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imin.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imin.ll
@@ -1,12 +1,23 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-;CHECK: V_MIN_I32_e32
-
-define void @main(i32 %p0, i32 %p1) #0 {
+; SI-LABEL: @vector_imin
+; SI: V_MIN_I32_e32
+define void @vector_imin(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
- %0 = call i32 @llvm.AMDGPU.imin(i32 %p0, i32 %p1)
- %1 = bitcast i32 %0 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %1, float %1, float %1, float %1)
+ %load = load i32 addrspace(1)* %in, align 4
+ %min = call i32 @llvm.AMDGPU.imin(i32 %p0, i32 %load)
+ %bc = bitcast i32 %min to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @scalar_imin
+; SI: S_MIN_I32
+define void @scalar_imin(i32 %p0, i32 %p1) #0 {
+entry:
+ %min = call i32 @llvm.AMDGPU.imin(i32 %p0, i32 %p1)
+ %bc = bitcast i32 %min to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
ret void
}
@@ -15,7 +26,7 @@ declare i32 @llvm.AMDGPU.imin(i32, i32) #1
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-attributes #0 = { "ShaderType"="0" }
-attributes #1 = { readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
!0 = metadata !{metadata !"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.kill.ll b/test/CodeGen/R600/llvm.AMDGPU.kill.ll
new file mode 100644
index 0000000..4ab6a8a
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.kill.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI %s
+
+; SI-LABEL: @kill_gs_const
+; SI-NOT: V_CMPX_LE_F32
+; SI: S_MOV_B64 exec, 0
+
+define void @kill_gs_const() #0 {
+main_body:
+ %0 = icmp ule i32 0, 3
+ %1 = select i1 %0, float 1.000000e+00, float -1.000000e+00
+ call void @llvm.AMDGPU.kill(float %1)
+ %2 = icmp ule i32 3, 0
+ %3 = select i1 %2, float 1.000000e+00, float -1.000000e+00
+ call void @llvm.AMDGPU.kill(float %3)
+ ret void
+}
+
+declare void @llvm.AMDGPU.kill(float)
+
+attributes #0 = { "ShaderType"="2" }
+
+!0 = metadata !{metadata !"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umax.ll b/test/CodeGen/R600/llvm.AMDGPU.umax.ll
index 4cfa133..1b8da2e 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umax.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umax.ll
@@ -1,12 +1,38 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-;CHECK: V_MAX_U32_e32
-
-define void @main(i32 %p0, i32 %p1) #0 {
+; SI-LABEL: @vector_umax
+; SI: V_MAX_U32_e32
+define void @vector_umax(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
- %0 = call i32 @llvm.AMDGPU.umax(i32 %p0, i32 %p1)
- %1 = bitcast i32 %0 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %1, float %1, float %1, float %1)
+ %load = load i32 addrspace(1)* %in, align 4
+ %max = call i32 @llvm.AMDGPU.umax(i32 %p0, i32 %load)
+ %bc = bitcast i32 %max to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @scalar_umax
+; SI: S_MAX_U32
+define void @scalar_umax(i32 %p0, i32 %p1) #0 {
+entry:
+ %max = call i32 @llvm.AMDGPU.umax(i32 %p0, i32 %p1)
+ %bc = bitcast i32 %max to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @trunc_zext_umax
+; SI: BUFFER_LOAD_UBYTE [[VREG:v[0-9]+]],
+; SI: V_MAX_U32_e32 [[RESULT:v[0-9]+]], 0, [[VREG]]
+; SI-NOT: AND
+; SI: BUFFER_STORE_SHORT [[RESULT]],
+define void @trunc_zext_umax(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) nounwind {
+ %tmp5 = load i8 addrspace(1)* %src, align 1
+ %tmp2 = zext i8 %tmp5 to i32
+ %tmp3 = tail call i32 @llvm.AMDGPU.umax(i32 %tmp2, i32 0) nounwind readnone
+ %tmp4 = trunc i32 %tmp3 to i8
+ %tmp6 = zext i8 %tmp4 to i16
+ store i16 %tmp6, i16 addrspace(1)* %out, align 2
ret void
}
@@ -15,7 +41,7 @@ declare i32 @llvm.AMDGPU.umax(i32, i32) #1
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-attributes #0 = { "ShaderType"="0" }
-attributes #1 = { readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
!0 = metadata !{metadata !"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umin.ll b/test/CodeGen/R600/llvm.AMDGPU.umin.ll
index 14af051..08397f8 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umin.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umin.ll
@@ -1,12 +1,38 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-;CHECK: V_MIN_U32_e32
-
-define void @main(i32 %p0, i32 %p1) #0 {
+; SI-LABEL: @vector_umin
+; SI: V_MIN_U32_e32
+define void @vector_umin(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
- %0 = call i32 @llvm.AMDGPU.umin(i32 %p0, i32 %p1)
- %1 = bitcast i32 %0 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %1, float %1, float %1, float %1)
+ %load = load i32 addrspace(1)* %in, align 4
+ %min = call i32 @llvm.AMDGPU.umin(i32 %p0, i32 %load)
+ %bc = bitcast i32 %min to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @scalar_umin
+; SI: S_MIN_U32
+define void @scalar_umin(i32 %p0, i32 %p1) #0 {
+entry:
+ %min = call i32 @llvm.AMDGPU.umin(i32 %p0, i32 %p1)
+ %bc = bitcast i32 %min to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @trunc_zext_umin
+; SI: BUFFER_LOAD_UBYTE [[VREG:v[0-9]+]],
+; SI: V_MIN_U32_e32 [[RESULT:v[0-9]+]], 0, [[VREG]]
+; SI-NOT: AND
+; SI: BUFFER_STORE_SHORT [[RESULT]],
+define void @trunc_zext_umin(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) nounwind {
+ %tmp5 = load i8 addrspace(1)* %src, align 1
+ %tmp2 = zext i8 %tmp5 to i32
+ %tmp3 = tail call i32 @llvm.AMDGPU.umin(i32 %tmp2, i32 0) nounwind readnone
+ %tmp4 = trunc i32 %tmp3 to i8
+ %tmp6 = zext i8 %tmp4 to i16
+ store i16 %tmp6, i16 addrspace(1)* %out, align 2
ret void
}
@@ -15,7 +41,7 @@ declare i32 @llvm.AMDGPU.umin(i32, i32) #1
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-attributes #0 = { "ShaderType"="0" }
-attributes #1 = { readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
!0 = metadata !{metadata !"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.SI.load.dword.ll b/test/CodeGen/R600/llvm.SI.load.dword.ll
new file mode 100644
index 0000000..a622775
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.load.dword.ll
@@ -0,0 +1,40 @@
+;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+
+; Example of a simple geometry shader loading vertex attributes from the
+; ESGS ring buffer
+
+; CHECK-LABEL: @main
+; CHECK: BUFFER_LOAD_DWORD
+; CHECK: BUFFER_LOAD_DWORD
+; CHECK: BUFFER_LOAD_DWORD
+; CHECK: BUFFER_LOAD_DWORD
+
+define void @main([17 x <16 x i8>] addrspace(2)* byval, [32 x <16 x i8>] addrspace(2)* byval, [16 x <32 x i8>] addrspace(2)* byval, [2 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* inreg, [17 x <16 x i8>] addrspace(2)* inreg, i32, i32, i32, i32) #0 {
+main_body:
+ %10 = getelementptr [2 x <16 x i8>] addrspace(2)* %3, i64 0, i32 1
+ %11 = load <16 x i8> addrspace(2)* %10, !tbaa !0
+ %12 = shl i32 %6, 2
+ %13 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %11, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0)
+ %14 = bitcast i32 %13 to float
+ %15 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %11, i32 %12, i32 0, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0)
+ %16 = bitcast i32 %15 to float
+ %17 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %11, i32 %12, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 0)
+ %18 = bitcast i32 %17 to float
+ %19 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %11, <2 x i32> <i32 0, i32 0>, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 0)
+ %20 = bitcast i32 %19 to float
+ call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %14, float %16, float %18, float %20)
+ ret void
+}
+
+; Function Attrs: nounwind readonly
+declare i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+; Function Attrs: nounwind readonly
+declare i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="1" }
+attributes #1 = { nounwind readonly }
+
+!0 = metadata !{metadata !"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.SI.sample-masked.ll b/test/CodeGen/R600/llvm.SI.sample-masked.ll
index e5e4ec4..445359a 100644
--- a/test/CodeGen/R600/llvm.SI.sample-masked.ll
+++ b/test/CodeGen/R600/llvm.SI.sample-masked.ll
@@ -2,7 +2,7 @@
; CHECK-LABEL: @v1
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 13
-define void @v1(i32 %a1) {
+define void @v1(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -15,7 +15,7 @@ entry:
; CHECK-LABEL: @v2
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 11
-define void @v2(i32 %a1) {
+define void @v2(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -28,7 +28,7 @@ entry:
; CHECK-LABEL: @v3
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 14
-define void @v3(i32 %a1) {
+define void @v3(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -41,7 +41,7 @@ entry:
; CHECK-LABEL: @v4
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 7
-define void @v4(i32 %a1) {
+define void @v4(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -54,7 +54,7 @@ entry:
; CHECK-LABEL: @v5
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 10
-define void @v5(i32 %a1) {
+define void @v5(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -66,7 +66,7 @@ entry:
; CHECK-LABEL: @v6
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 6
-define void @v6(i32 %a1) {
+define void @v6(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -78,7 +78,7 @@ entry:
; CHECK-LABEL: @v7
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 9
-define void @v7(i32 %a1) {
+define void @v7(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -91,3 +91,5 @@ entry:
declare <4 x float> @llvm.SI.sample.v1i32(<1 x i32>, <32 x i8>, <16 x i8>, i32) readnone
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/llvm.SI.sample.ll b/test/CodeGen/R600/llvm.SI.sample.ll
index d41737c..24e8f64 100644
--- a/test/CodeGen/R600/llvm.SI.sample.ll
+++ b/test/CodeGen/R600/llvm.SI.sample.ll
@@ -17,7 +17,7 @@
;CHECK-DAG: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 14
;CHECK-DAG: IMAGE_SAMPLE {{v[0-9]+}}, 8
-define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
+define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) #0 {
%v1 = insertelement <4 x i32> undef, i32 %a1, i32 0
%v2 = insertelement <4 x i32> undef, i32 %a1, i32 1
%v3 = insertelement <4 x i32> undef, i32 %a1, i32 2
@@ -137,7 +137,7 @@ define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
; CHECK: @v1
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 15
-define void @v1(i32 %a1) {
+define void @v1(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -155,3 +155,5 @@ declare <4 x float> @llvm.SI.sample.v1i32(<1 x i32>, <32 x i8>, <16 x i8>, i32)
declare <4 x float> @llvm.SI.sample.(<4 x i32>, <32 x i8>, <16 x i8>, i32) readnone
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/llvm.SI.sampled.ll b/test/CodeGen/R600/llvm.SI.sampled.ll
index 21ac725..366456f 100644
--- a/test/CodeGen/R600/llvm.SI.sampled.ll
+++ b/test/CodeGen/R600/llvm.SI.sampled.ll
@@ -17,7 +17,7 @@
;CHECK-DAG: IMAGE_SAMPLE_D {{v\[[0-9]+:[0-9]+\]}}, 14
;CHECK-DAG: IMAGE_SAMPLE_D {{v[0-9]+}}, 8
-define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
+define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) #0 {
%v1 = insertelement <4 x i32> undef, i32 %a1, i32 0
%v2 = insertelement <4 x i32> undef, i32 %a1, i32 1
%v3 = insertelement <4 x i32> undef, i32 %a1, i32 2
@@ -138,3 +138,5 @@ define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
declare <4 x float> @llvm.SI.sampled.(<4 x i32>, <32 x i8>, <16 x i8>, i32) readnone
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/llvm.SI.sendmsg.ll b/test/CodeGen/R600/llvm.SI.sendmsg.ll
new file mode 100644
index 0000000..581d422
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.sendmsg.ll
@@ -0,0 +1,21 @@
+;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+
+; CHECK-LABEL: @main
+; CHECK: S_SENDMSG Gs(emit stream 0)
+; CHECK: S_SENDMSG Gs(cut stream 1)
+; CHECK: S_SENDMSG Gs(emit-cut stream 2)
+; CHECK: S_SENDMSG Gs_done(nop)
+
+define void @main() {
+main_body:
+ call void @llvm.SI.sendmsg(i32 34, i32 0);
+ call void @llvm.SI.sendmsg(i32 274, i32 0);
+ call void @llvm.SI.sendmsg(i32 562, i32 0);
+ call void @llvm.SI.sendmsg(i32 3, i32 0);
+ ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.SI.sendmsg(i32, i32) #0
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/R600/llvm.SI.tbuffer.store.ll b/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
index fa7c3ca..569efb6 100644
--- a/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
+++ b/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
@@ -1,8 +1,8 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-;CHECK_LABEL: @test1
+;CHECK-LABEL: @test1
;CHECK: TBUFFER_STORE_FORMAT_XYZW {{v\[[0-9]+:[0-9]+\]}}, 32, -1, 0, -1, 0, 14, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
-define void @test1(i32 %a1, i32 %vaddr) {
+define void @test1(i32 %a1, i32 %vaddr) #0 {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
i32 4, i32 %vaddr, i32 0, i32 32, i32 14, i32 4, i32 1, i32 0, i32 1,
@@ -10,9 +10,9 @@ define void @test1(i32 %a1, i32 %vaddr) {
ret void
}
-;CHECK_LABEL: @test2
+;CHECK-LABEL: @test2
;CHECK: TBUFFER_STORE_FORMAT_XYZ {{v\[[0-9]+:[0-9]+\]}}, 24, -1, 0, -1, 0, 13, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
-define void @test2(i32 %a1, i32 %vaddr) {
+define void @test2(i32 %a1, i32 %vaddr) #0 {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
i32 3, i32 %vaddr, i32 0, i32 24, i32 13, i32 4, i32 1, i32 0, i32 1,
@@ -20,9 +20,9 @@ define void @test2(i32 %a1, i32 %vaddr) {
ret void
}
-;CHECK_LABEL: @test3
+;CHECK-LABEL: @test3
;CHECK: TBUFFER_STORE_FORMAT_XY {{v\[[0-9]+:[0-9]+\]}}, 16, -1, 0, -1, 0, 11, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
-define void @test3(i32 %a1, i32 %vaddr) {
+define void @test3(i32 %a1, i32 %vaddr) #0 {
%vdata = insertelement <2 x i32> undef, i32 %a1, i32 0
call void @llvm.SI.tbuffer.store.v2i32(<16 x i8> undef, <2 x i32> %vdata,
i32 2, i32 %vaddr, i32 0, i32 16, i32 11, i32 4, i32 1, i32 0, i32 1,
@@ -30,9 +30,9 @@ define void @test3(i32 %a1, i32 %vaddr) {
ret void
}
-;CHECK_LABEL: @test4
+;CHECK-LABEL: @test4
;CHECK: TBUFFER_STORE_FORMAT_X {{v[0-9]+}}, 8, -1, 0, -1, 0, 4, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
-define void @test4(i32 %vdata, i32 %vaddr) {
+define void @test4(i32 %vdata, i32 %vaddr) #0 {
call void @llvm.SI.tbuffer.store.i32(<16 x i8> undef, i32 %vdata,
i32 1, i32 %vaddr, i32 0, i32 8, i32 4, i32 4, i32 1, i32 0, i32 1,
i32 1, i32 0)
@@ -42,3 +42,5 @@ define void @test4(i32 %vdata, i32 %vaddr) {
declare void @llvm.SI.tbuffer.store.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
declare void @llvm.SI.tbuffer.store.v2i32(<16 x i8>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
declare void @llvm.SI.tbuffer.store.v4i32(<16 x i8>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
+
+attributes #0 = { "ShaderType"="1" }
diff --git a/test/CodeGen/R600/llvm.exp2.ll b/test/CodeGen/R600/llvm.exp2.ll
new file mode 100644
index 0000000..13bfbab
--- /dev/null
+++ b/test/CodeGen/R600/llvm.exp2.ll
@@ -0,0 +1,26 @@
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
+;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK
+
+;EG-CHECK-LABEL: @test
+;EG-CHECK: EXP_IEEE *
+;CM-CHECK-LABEL: @test
+;CM-CHECK: EXP_IEEE T{{[0-9]+}}.X, -|T{{[0-9]+}}.X|
+;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Y (MASKED), -|T{{[0-9]+}}.X|
+;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Z (MASKED), -|T{{[0-9]+}}.X|
+;CM-CHECK: EXP_IEEE * T{{[0-9]+}}.W (MASKED), -|T{{[0-9]+}}.X|
+
+define void @test(<4 x float> inreg %reg0) #0 {
+ %r0 = extractelement <4 x float> %reg0, i32 0
+ %r1 = call float @llvm.fabs.f32(float %r0)
+ %r2 = fsub float -0.000000e+00, %r1
+ %r3 = call float @llvm.exp2.f32(float %r2)
+ %vec = insertelement <4 x float> undef, float %r3, i32 0
+ call void @llvm.R600.store.swizzle(<4 x float> %vec, i32 0, i32 0)
+ ret void
+}
+
+declare float @llvm.exp2.f32(float) readnone
+declare float @llvm.fabs.f32(float) readnone
+declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
+
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/llvm.pow.ll b/test/CodeGen/R600/llvm.pow.ll
index b587d2b..c4ae652 100644
--- a/test/CodeGen/R600/llvm.pow.ll
+++ b/test/CodeGen/R600/llvm.pow.ll
@@ -1,10 +1,11 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL NON-IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}}
-;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;CHECK-LABEL: test1:
+;CHECK: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}},
+;CHECK-NEXT: MUL NON-IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}},
+;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}},
-define void @test(<4 x float> inreg %reg0) #0 {
+define void @test1(<4 x float> inreg %reg0) #0 {
%r0 = extractelement <4 x float> %reg0, i32 0
%r1 = extractelement <4 x float> %reg0, i32 1
%r2 = call float @llvm.pow.f32( float %r0, float %r1)
@@ -13,7 +14,27 @@ define void @test(<4 x float> inreg %reg0) #0 {
ret void
}
+;CHECK-LABEL: test2:
+;CHECK: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}},
+;CHECK-NEXT: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}},
+;CHECK-NEXT: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}},
+;CHECK-NEXT: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}},
+;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}},
+;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}},
+;CHECK-NEXT: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}},
+;CHECK-NEXT: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}},
+;CHECK-NEXT: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}},
+;CHECK-NEXT: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}},
+;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}},
+;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}},
+define void @test2(<4 x float> inreg %reg0, <4 x float> inreg %reg1) #0 {
+ %vec = call <4 x float> @llvm.pow.v4f32( <4 x float> %reg0, <4 x float> %reg1)
+ call void @llvm.R600.store.swizzle(<4 x float> %vec, i32 0, i32 0)
+ ret void
+}
+
declare float @llvm.pow.f32(float ,float ) readonly
+declare <4 x float> @llvm.pow.v4f32(<4 x float> ,<4 x float> ) readonly
declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/llvm.trunc.ll b/test/CodeGen/R600/llvm.trunc.ll
new file mode 100644
index 0000000..fa6fb99
--- /dev/null
+++ b/test/CodeGen/R600/llvm.trunc.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK-LABEL: @trunc_f32
+; CHECK: TRUNC
+
+define void @trunc_f32(float addrspace(1)* %out, float %in) {
+entry:
+ %0 = call float @llvm.trunc.f32(float %in)
+ store float %0, float addrspace(1)* %out
+ ret void
+}
+
+declare float @llvm.trunc.f32(float)
diff --git a/test/CodeGen/R600/load.ll b/test/CodeGen/R600/load.ll
index e4492d7..1486c4d 100644
--- a/test/CodeGen/R600/load.ll
+++ b/test/CodeGen/R600/load.ll
@@ -1,16 +1,15 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=R600-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=R600-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=FUNC %s
;===------------------------------------------------------------------------===;
; GLOBAL ADDRESS SPACE
;===------------------------------------------------------------------------===;
; Load an i8 value from the global address space.
-; R600-CHECK-LABEL: @load_i8
+; FUNC-LABEL: @load_i8
; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_i8
; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
%1 = load i8 addrspace(1)* %in
@@ -19,13 +18,12 @@ define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
ret void
}
-; R600-CHECK-LABEL: @load_i8_sext
+; FUNC-LABEL: @load_i8_sext
; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 24
; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
; R600-CHECK: 24
-; SI-CHECK-LABEL: @load_i8_sext
; SI-CHECK: BUFFER_LOAD_SBYTE
define void @load_i8_sext(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
@@ -35,10 +33,9 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i8
+; FUNC-LABEL: @load_v2i8
; R600-CHECK: VTX_READ_8
; R600-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: @load_v2i8
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
define void @load_v2i8(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
@@ -49,7 +46,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i8_sext
+; FUNC-LABEL: @load_v2i8_sext
; R600-CHECK-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
@@ -60,7 +57,6 @@ entry:
; R600-CHECK-DAG: 24
; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
; R600-CHECK-DAG: 24
-; SI-CHECK-LABEL: @load_v2i8_sext
; SI-CHECK: BUFFER_LOAD_SBYTE
; SI-CHECK: BUFFER_LOAD_SBYTE
define void @load_v2i8_sext(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
@@ -71,12 +67,11 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i8
+; FUNC-LABEL: @load_v4i8
; R600-CHECK: VTX_READ_8
; R600-CHECK: VTX_READ_8
; R600-CHECK: VTX_READ_8
; R600-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: @load_v4i8
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
@@ -89,7 +84,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i8_sext
+; FUNC-LABEL: @load_v4i8_sext
; R600-CHECK-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: VTX_READ_8 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
@@ -110,7 +105,6 @@ entry:
; R600-CHECK-DAG: 24
; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_W_CHAN]]
; R600-CHECK-DAG: 24
-; SI-CHECK-LABEL: @load_v4i8_sext
; SI-CHECK: BUFFER_LOAD_SBYTE
; SI-CHECK: BUFFER_LOAD_SBYTE
; SI-CHECK: BUFFER_LOAD_SBYTE
@@ -124,9 +118,8 @@ entry:
}
; Load an i16 value from the global address space.
-; R600-CHECK-LABEL: @load_i16
+; FUNC-LABEL: @load_i16
; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_i16
; SI-CHECK: BUFFER_LOAD_USHORT
define void @load_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
entry:
@@ -136,13 +129,12 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_i16_sext
+; FUNC-LABEL: @load_i16_sext
; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 16
; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
; R600-CHECK: 16
-; SI-CHECK-LABEL: @load_i16_sext
; SI-CHECK: BUFFER_LOAD_SSHORT
define void @load_i16_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
entry:
@@ -152,10 +144,9 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i16
+; FUNC-LABEL: @load_v2i16
; R600-CHECK: VTX_READ_16
; R600-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: @load_v2i16
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
define void @load_v2i16(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
@@ -166,7 +157,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i16_sext
+; FUNC-LABEL: @load_v2i16_sext
; R600-CHECK-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
@@ -177,7 +168,6 @@ entry:
; R600-CHECK-DAG: 16
; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
; R600-CHECK-DAG: 16
-; SI-CHECK-LABEL: @load_v2i16_sext
; SI-CHECK: BUFFER_LOAD_SSHORT
; SI-CHECK: BUFFER_LOAD_SSHORT
define void @load_v2i16_sext(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
@@ -188,12 +178,11 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i16
+; FUNC-LABEL: @load_v4i16
; R600-CHECK: VTX_READ_16
; R600-CHECK: VTX_READ_16
; R600-CHECK: VTX_READ_16
; R600-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: @load_v4i16
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
@@ -206,7 +195,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i16_sext
+; FUNC-LABEL: @load_v4i16_sext
; R600-CHECK-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: VTX_READ_16 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
@@ -227,7 +216,6 @@ entry:
; R600-CHECK-DAG: 16
; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_W_CHAN]]
; R600-CHECK-DAG: 16
-; SI-CHECK-LABEL: @load_v4i16_sext
; SI-CHECK: BUFFER_LOAD_SSHORT
; SI-CHECK: BUFFER_LOAD_SSHORT
; SI-CHECK: BUFFER_LOAD_SSHORT
@@ -241,10 +229,9 @@ entry:
}
; load an i32 value from the global address space.
-; R600-CHECK-LABEL: @load_i32
+; FUNC-LABEL: @load_i32
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK-LABEL: @load_i32
; SI-CHECK: BUFFER_LOAD_DWORD v{{[0-9]+}}
define void @load_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
@@ -254,10 +241,9 @@ entry:
}
; load a f32 value from the global address space.
-; R600-CHECK-LABEL: @load_f32
+; FUNC-LABEL: @load_f32
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK-LABEL: @load_f32
; SI-CHECK: BUFFER_LOAD_DWORD v{{[0-9]+}}
define void @load_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
entry:
@@ -267,10 +253,9 @@ entry:
}
; load a v2f32 value from the global address space
-; R600-CHECK-LABEL: @load_v2f32
+; FUNC-LABEL: @load_v2f32
; R600-CHECK: VTX_READ_64
-; SI-CHECK-LABEL: @load_v2f32
; SI-CHECK: BUFFER_LOAD_DWORDX2
define void @load_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) {
entry:
@@ -279,11 +264,10 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_i64
+; FUNC-LABEL: @load_i64
; R600-CHECK: MEM_RAT
; R600-CHECK: MEM_RAT
-; SI-CHECK-LABEL: @load_i64
; SI-CHECK: BUFFER_LOAD_DWORDX2
define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
entry:
@@ -292,13 +276,12 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_i64_sext
+; FUNC-LABEL: @load_i64_sext
; R600-CHECK: MEM_RAT
; R600-CHECK: MEM_RAT
; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, literal.x
; R600-CHECK: 31
-; SI-CHECK-LABEL: @load_i64_sext
-; SI-CHECK: BUFFER_LOAD_DWORDX2 [[VAL:v\[[0-9]:[0-9]\]]]
+; SI-CHECK: BUFFER_LOAD_DWORD
define void @load_i64_sext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
@@ -308,7 +291,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_i64_zext
+; FUNC-LABEL: @load_i64_zext
; R600-CHECK: MEM_RAT
; R600-CHECK: MEM_RAT
define void @load_i64_zext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
@@ -319,18 +302,65 @@ entry:
ret void
}
+; FUNC-LABEL: @load_v8i32
+; R600-CHECK: VTX_READ_128
+; R600-CHECK: VTX_READ_128
+; XXX: We should be using DWORDX4 instructions on SI.
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+define void @load_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) {
+entry:
+ %0 = load <8 x i32> addrspace(1)* %in
+ store <8 x i32> %0, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @load_v16i32
+; R600-CHECK: VTX_READ_128
+; R600-CHECK: VTX_READ_128
+; R600-CHECK: VTX_READ_128
+; R600-CHECK: VTX_READ_128
+; XXX: We should be using DWORDX4 instructions on SI.
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+define void @load_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(1)* %in) {
+entry:
+ %0 = load <16 x i32> addrspace(1)* %in
+ store <16 x i32> %0, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
;===------------------------------------------------------------------------===;
; CONSTANT ADDRESS SPACE
;===------------------------------------------------------------------------===;
; Load a sign-extended i8 value
-; R600-CHECK-LABEL: @load_const_i8_sext
+; FUNC-LABEL: @load_const_i8_sext
; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 24
; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
; R600-CHECK: 24
-; SI-CHECK-LABEL: @load_const_i8_sext
; SI-CHECK: BUFFER_LOAD_SBYTE v{{[0-9]+}},
define void @load_const_i8_sext(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
entry:
@@ -341,9 +371,8 @@ entry:
}
; Load an aligned i8 value
-; R600-CHECK-LABEL: @load_const_i8_aligned
+; FUNC-LABEL: @load_const_i8_aligned
; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_const_i8_aligned
; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
define void @load_const_i8_aligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
entry:
@@ -354,9 +383,8 @@ entry:
}
; Load an un-aligned i8 value
-; R600-CHECK-LABEL: @load_const_i8_unaligned
+; FUNC-LABEL: @load_const_i8_unaligned
; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_const_i8_unaligned
; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
define void @load_const_i8_unaligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
entry:
@@ -368,13 +396,12 @@ entry:
}
; Load a sign-extended i16 value
-; R600-CHECK-LABEL: @load_const_i16_sext
+; FUNC-LABEL: @load_const_i16_sext
; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 16
; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
; R600-CHECK: 16
-; SI-CHECK-LABEL: @load_const_i16_sext
; SI-CHECK: BUFFER_LOAD_SSHORT
define void @load_const_i16_sext(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry:
@@ -385,9 +412,8 @@ entry:
}
; Load an aligned i16 value
-; R600-CHECK-LABEL: @load_const_i16_aligned
+; FUNC-LABEL: @load_const_i16_aligned
; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_const_i16_aligned
; SI-CHECK: BUFFER_LOAD_USHORT
define void @load_const_i16_aligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry:
@@ -398,9 +424,8 @@ entry:
}
; Load an un-aligned i16 value
-; R600-CHECK-LABEL: @load_const_i16_unaligned
+; FUNC-LABEL: @load_const_i16_unaligned
; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_const_i16_unaligned
; SI-CHECK: BUFFER_LOAD_USHORT
define void @load_const_i16_unaligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry:
@@ -412,10 +437,9 @@ entry:
}
; Load an i32 value from the constant address space.
-; R600-CHECK-LABEL: @load_const_addrspace_i32
+; FUNC-LABEL: @load_const_addrspace_i32
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK-LABEL: @load_const_addrspace_i32
; SI-CHECK: S_LOAD_DWORD s{{[0-9]+}}
define void @load_const_addrspace_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
entry:
@@ -425,10 +449,9 @@ entry:
}
; Load a f32 value from the constant address space.
-; R600-CHECK-LABEL: @load_const_addrspace_f32
+; FUNC-LABEL: @load_const_addrspace_f32
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK-LABEL: @load_const_addrspace_f32
; SI-CHECK: S_LOAD_DWORD s{{[0-9]+}}
define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(2)* %in) {
%1 = load float addrspace(2)* %in
@@ -441,10 +464,10 @@ define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(
;===------------------------------------------------------------------------===;
; Load an i8 value from the local address space.
-; R600-CHECK-LABEL: @load_i8_local
+; FUNC-LABEL: @load_i8_local
; R600-CHECK: LDS_UBYTE_READ_RET
-; SI-CHECK-LABEL: @load_i8_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U8
define void @load_i8_local(i32 addrspace(1)* %out, i8 addrspace(3)* %in) {
%1 = load i8 addrspace(3)* %in
@@ -453,11 +476,11 @@ define void @load_i8_local(i32 addrspace(1)* %out, i8 addrspace(3)* %in) {
ret void
}
-; R600-CHECK-LABEL: @load_i8_sext_local
+; FUNC-LABEL: @load_i8_sext_local
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: ASHR
-; SI-CHECK-LABEL: @load_i8_sext_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I8
define void @load_i8_sext_local(i32 addrspace(1)* %out, i8 addrspace(3)* %in) {
entry:
@@ -467,11 +490,11 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i8_local
+; FUNC-LABEL: @load_v2i8_local
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: LDS_UBYTE_READ_RET
-; SI-CHECK-LABEL: @load_v2i8_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U8
; SI-CHECK: DS_READ_U8
define void @load_v2i8_local(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(3)* %in) {
@@ -482,13 +505,13 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i8_sext_local
+; FUNC-LABEL: @load_v2i8_sext_local
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
-; SI-CHECK-LABEL: @load_v2i8_sext_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I8
; SI-CHECK: DS_READ_I8
define void @load_v2i8_sext_local(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(3)* %in) {
@@ -499,13 +522,13 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i8_local
+; FUNC-LABEL: @load_v4i8_local
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: LDS_UBYTE_READ_RET
-; SI-CHECK-LABEL: @load_v4i8_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U8
; SI-CHECK: DS_READ_U8
; SI-CHECK: DS_READ_U8
@@ -518,7 +541,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i8_sext_local
+; FUNC-LABEL: @load_v4i8_sext_local
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
@@ -527,8 +550,8 @@ entry:
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
-; SI-CHECK-LABEL: @load_v4i8_sext_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I8
; SI-CHECK: DS_READ_I8
; SI-CHECK: DS_READ_I8
@@ -542,10 +565,10 @@ entry:
}
; Load an i16 value from the local address space.
-; R600-CHECK-LABEL: @load_i16_local
+; FUNC-LABEL: @load_i16_local
; R600-CHECK: LDS_USHORT_READ_RET
-; SI-CHECK-LABEL: @load_i16_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U16
define void @load_i16_local(i32 addrspace(1)* %out, i16 addrspace(3)* %in) {
entry:
@@ -555,11 +578,11 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_i16_sext_local
+; FUNC-LABEL: @load_i16_sext_local
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: ASHR
-; SI-CHECK-LABEL: @load_i16_sext_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I16
define void @load_i16_sext_local(i32 addrspace(1)* %out, i16 addrspace(3)* %in) {
entry:
@@ -569,11 +592,11 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i16_local
+; FUNC-LABEL: @load_v2i16_local
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: LDS_USHORT_READ_RET
-; SI-CHECK-LABEL: @load_v2i16_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U16
; SI-CHECK: DS_READ_U16
define void @load_v2i16_local(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(3)* %in) {
@@ -584,13 +607,13 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i16_sext_local
+; FUNC-LABEL: @load_v2i16_sext_local
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
-; SI-CHECK-LABEL: @load_v2i16_sext_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I16
; SI-CHECK: DS_READ_I16
define void @load_v2i16_sext_local(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(3)* %in) {
@@ -601,13 +624,13 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i16_local
+; FUNC-LABEL: @load_v4i16_local
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: LDS_USHORT_READ_RET
-; SI-CHECK-LABEL: @load_v4i16_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U16
; SI-CHECK: DS_READ_U16
; SI-CHECK: DS_READ_U16
@@ -620,7 +643,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i16_sext_local
+; FUNC-LABEL: @load_v4i16_sext_local
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: LDS_USHORT_READ_RET
@@ -629,8 +652,8 @@ entry:
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
-; SI-CHECK-LABEL: @load_v4i16_sext_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I16
; SI-CHECK: DS_READ_I16
; SI-CHECK: DS_READ_I16
@@ -643,11 +666,11 @@ entry:
ret void
}
-; load an i32 value from the glocal address space.
-; R600-CHECK-LABEL: @load_i32_local
+; load an i32 value from the local address space.
+; FUNC-LABEL: @load_i32_local
; R600-CHECK: LDS_READ_RET
-; SI-CHECK-LABEL: @load_i32_local
; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_B32
define void @load_i32_local(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
@@ -656,10 +679,10 @@ entry:
ret void
}
-; load a f32 value from the global address space.
-; R600-CHECK-LABEL: @load_f32_local
+; load a f32 value from the local address space.
+; FUNC-LABEL: @load_f32_local
; R600-CHECK: LDS_READ_RET
-; SI-CHECK-LABEL: @load_f32_local
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_B32
define void @load_f32_local(float addrspace(1)* %out, float addrspace(3)* %in) {
entry:
@@ -669,10 +692,10 @@ entry:
}
; load a v2f32 value from the local address space
-; R600-CHECK-LABEL: @load_v2f32_local
+; FUNC-LABEL: @load_v2f32_local
; R600-CHECK: LDS_READ_RET
; R600-CHECK: LDS_READ_RET
-; SI-CHECK-LABEL: @load_v2f32_local
+; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_B32
; SI-CHECK: DS_READ_B32
define void @load_v2f32_local(<2 x float> addrspace(1)* %out, <2 x float> addrspace(3)* %in) {
diff --git a/test/CodeGen/R600/load64.ll b/test/CodeGen/R600/load64.ll
index e351e41..a117557 100644
--- a/test/CodeGen/R600/load64.ll
+++ b/test/CodeGen/R600/load64.ll
@@ -1,18 +1,28 @@
; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
; load a f64 value from the global address space.
-; CHECK: @load_f64
+; CHECK-LABEL: @load_f64:
; CHECK: BUFFER_LOAD_DWORDX2 v[{{[0-9]+:[0-9]+}}]
+; CHECK: BUFFER_STORE_DWORDX2 v[{{[0-9]+:[0-9]+}}]
define void @load_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
-entry:
- %0 = load double addrspace(1)* %in
- store double %0, double addrspace(1)* %out
+ %1 = load double addrspace(1)* %in
+ store double %1, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: @load_i64:
+; CHECK: BUFFER_LOAD_DWORDX2 v[{{[0-9]+:[0-9]+}}]
+; CHECK: BUFFER_STORE_DWORDX2 v[{{[0-9]+:[0-9]+}}]
+define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %tmp = load i64 addrspace(1)* %in
+ store i64 %tmp, i64 addrspace(1)* %out, align 8
ret void
}
; Load a f64 value from the constant address space.
-; CHECK: @load_const_addrspace_f64
+; CHECK-LABEL: @load_const_addrspace_f64:
; CHECK: S_LOAD_DWORDX2 s[{{[0-9]+:[0-9]+}}]
+; CHECK: BUFFER_STORE_DWORDX2 v[{{[0-9]+:[0-9]+}}]
define void @load_const_addrspace_f64(double addrspace(1)* %out, double addrspace(2)* %in) {
%1 = load double addrspace(2)* %in
store double %1, double addrspace(1)* %out
diff --git a/test/CodeGen/R600/local-64.ll b/test/CodeGen/R600/local-64.ll
new file mode 100644
index 0000000..87f18ae
--- /dev/null
+++ b/test/CodeGen/R600/local-64.ll
@@ -0,0 +1,158 @@
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @local_i32_load
+; SI: DS_READ_B32 [[REG:v[0-9]+]], v{{[0-9]+}}, 28, [M0]
+; SI: BUFFER_STORE_DWORD [[REG]],
+define void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %in, i32 7
+ %val = load i32 addrspace(3)* %gep, align 4
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @local_i32_load_0_offset
+; SI: DS_READ_B32 [[REG:v[0-9]+]], v{{[0-9]+}}, 0, [M0]
+; SI: BUFFER_STORE_DWORD [[REG]],
+define void @local_i32_load_0_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
+ %val = load i32 addrspace(3)* %in, align 4
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @local_i8_load_i16_max_offset
+; SI-NOT: ADD
+; SI: DS_READ_U8 [[REG:v[0-9]+]], {{v[0-9]+}}, -1, [M0]
+; SI: BUFFER_STORE_BYTE [[REG]],
+define void @local_i8_load_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
+ %gep = getelementptr i8 addrspace(3)* %in, i32 65535
+ %val = load i8 addrspace(3)* %gep, align 4
+ store i8 %val, i8 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @local_i8_load_over_i16_max_offset
+; SI: S_ADD_I32 [[ADDR:s[0-9]+]], s{{[0-9]+}}, 65536
+; SI: V_MOV_B32_e32 [[VREGADDR:v[0-9]+]], [[ADDR]]
+; SI: DS_READ_U8 [[REG:v[0-9]+]], [[VREGADDR]], 0, [M0]
+; SI: BUFFER_STORE_BYTE [[REG]],
+define void @local_i8_load_over_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
+ %gep = getelementptr i8 addrspace(3)* %in, i32 65536
+ %val = load i8 addrspace(3)* %gep, align 4
+ store i8 %val, i8 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @local_i64_load
+; SI-NOT: ADD
+; SI: DS_READ_B64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}}, 56, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[REG]],
+define void @local_i64_load(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %in, i32 7
+ %val = load i64 addrspace(3)* %gep, align 8
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_i64_load_0_offset
+; SI: DS_READ_B64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[REG]],
+define void @local_i64_load_0_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
+ %val = load i64 addrspace(3)* %in, align 8
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_f64_load
+; SI-NOT: ADD
+; SI: DS_READ_B64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}}, 56, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[REG]],
+define void @local_f64_load(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
+ %gep = getelementptr double addrspace(3)* %in, i32 7
+ %val = load double addrspace(3)* %gep, align 8
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_f64_load_0_offset
+; SI: DS_READ_B64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[REG]],
+define void @local_f64_load_0_offset(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
+ %val = load double addrspace(3)* %in, align 8
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_i64_store
+; SI-NOT: ADD
+; SI: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 56 [M0]
+define void @local_i64_store(i64 addrspace(3)* %out) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %out, i32 7
+ store i64 5678, i64 addrspace(3)* %gep, align 8
+ ret void
+}
+
+; SI-LABEL: @local_i64_store_0_offset
+; SI-NOT: ADD
+; SI: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0 [M0]
+define void @local_i64_store_0_offset(i64 addrspace(3)* %out) nounwind {
+ store i64 1234, i64 addrspace(3)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_f64_store
+; SI-NOT: ADD
+; SI: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 56 [M0]
+define void @local_f64_store(double addrspace(3)* %out) nounwind {
+ %gep = getelementptr double addrspace(3)* %out, i32 7
+ store double 16.0, double addrspace(3)* %gep, align 8
+ ret void
+}
+
+; SI-LABEL: @local_f64_store_0_offset
+; SI: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0 [M0]
+define void @local_f64_store_0_offset(double addrspace(3)* %out) nounwind {
+ store double 20.0, double addrspace(3)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_v2i64_store
+; SI-NOT: ADD
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 120 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 112 [M0]
+define void @local_v2i64_store(<2 x i64> addrspace(3)* %out) nounwind {
+ %gep = getelementptr <2 x i64> addrspace(3)* %out, i32 7
+ store <2 x i64> <i64 5678, i64 5678>, <2 x i64> addrspace(3)* %gep, align 16
+ ret void
+}
+
+; SI-LABEL: @local_v2i64_store_0_offset
+; SI-NOT: ADD
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 8 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0 [M0]
+define void @local_v2i64_store_0_offset(<2 x i64> addrspace(3)* %out) nounwind {
+ store <2 x i64> <i64 1234, i64 1234>, <2 x i64> addrspace(3)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @local_v4i64_store
+; SI-NOT: ADD
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 248 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 240 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 232 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 224 [M0]
+define void @local_v4i64_store(<4 x i64> addrspace(3)* %out) nounwind {
+ %gep = getelementptr <4 x i64> addrspace(3)* %out, i32 7
+ store <4 x i64> <i64 5678, i64 5678, i64 5678, i64 5678>, <4 x i64> addrspace(3)* %gep, align 16
+ ret void
+}
+
+; SI-LABEL: @local_v4i64_store_0_offset
+; SI-NOT: ADD
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 24 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 16 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 8 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0 [M0]
+define void @local_v4i64_store_0_offset(<4 x i64> addrspace(3)* %out) nounwind {
+ store <4 x i64> <i64 1234, i64 1234, i64 1234, i64 1234>, <4 x i64> addrspace(3)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/local-memory-two-objects.ll b/test/CodeGen/R600/local-memory-two-objects.ll
index e2d8406..616000d 100644
--- a/test/CodeGen/R600/local-memory-two-objects.ll
+++ b/test/CodeGen/R600/local-memory-two-objects.ll
@@ -17,18 +17,19 @@
; this consistently on evergreen GPUs.
; EG-CHECK: LDS_WRITE
; EG-CHECK: LDS_WRITE
-; SI-CHECK: DS_WRITE_B32 0, {{v[0-9]*}}, v[[ADDRW:[0-9]*]]
-; SI-CHECK-NOT: DS_WRITE_B32 0, {{v[0-9]*}}, v[[ADDRW]]
+; SI-CHECK: DS_WRITE_B32 {{v[0-9]*}}, v[[ADDRW:[0-9]*]]
+; SI-CHECK-NOT: DS_WRITE_B32 {{v[0-9]*}}, v[[ADDRW]]
; GROUP_BARRIER must be the last instruction in a clause
; EG-CHECK: GROUP_BARRIER
; EG-CHECK-NEXT: ALU clause
-; Make sure the lds reads are using different addresses.
+; Make sure the lds reads are using different addresses, at different
+; constant offsets.
; EG-CHECK: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]]
; EG-CHECK-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]]
-; SI-CHECK: DS_READ_B32 {{v[0-9]+}}, 0, [[ADDRR:v[0-9]+]]
-; SI-CHECK-NOT: DS_READ_B32 {{v[0-9]+}}, 0, [[ADDRR]]
+; SI-CHECK: DS_READ_B32 {{v[0-9]+}}, [[ADDRR:v[0-9]+]], 16
+; SI-CHECK: DS_READ_B32 {{v[0-9]+}}, [[ADDRR]], 0,
define void @local_memory_two_objects(i32 addrspace(1)* %out) {
entry:
diff --git a/test/CodeGen/R600/local-memory.ll b/test/CodeGen/R600/local-memory.ll
index 2168a3d..6ebe41d 100644
--- a/test/CodeGen/R600/local-memory.ll
+++ b/test/CodeGen/R600/local-memory.ll
@@ -17,8 +17,8 @@
; CI-CHECK-NEXT: .long 32768
; EG-CHECK: LDS_WRITE
-; SI-CHECK_NOT: S_WQM_B64
-; SI-CHECK: DS_WRITE_B32 0
+; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: DS_WRITE_B32
; GROUP_BARRIER must be the last instruction in a clause
; EG-CHECK: GROUP_BARRIER
@@ -26,7 +26,7 @@
; SI-CHECK: S_BARRIER
; EG-CHECK: LDS_READ_RET
-; SI-CHECK: DS_READ_B32 {{v[0-9]+}}, 0
+; SI-CHECK: DS_READ_B32 {{v[0-9]+}},
define void @local_memory(i32 addrspace(1)* %out) {
entry:
diff --git a/test/CodeGen/R600/loop-idiom.ll b/test/CodeGen/R600/loop-idiom.ll
new file mode 100644
index 0000000..8a9cba2
--- /dev/null
+++ b/test/CodeGen/R600/loop-idiom.ll
@@ -0,0 +1,54 @@
+; RUN: opt -basicaa -loop-idiom -S < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600 --check-prefix=FUNC %s
+; RUN: opt -basicaa -loop-idiom -S < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+target triple = "r600--"
+
+
+
+; Make sure loop-idiom doesn't create memcpy or memset. There are no library
+; implementations of these for R600.
+
+; FUNC: @no_memcpy
+; R600-NOT: @llvm.memcpy
+; SI-NOT: @llvm.memcpy
+define void @no_memcpy(i8 addrspace(3)* %in, i32 %size) {
+entry:
+ %dest = alloca i8, i32 32
+ br label %for.body
+
+for.body:
+ %0 = phi i32 [0, %entry], [%4, %for.body]
+ %1 = getelementptr i8 addrspace(3)* %in, i32 %0
+ %2 = getelementptr i8* %dest, i32 %0
+ %3 = load i8 addrspace(3)* %1
+ store i8 %3, i8* %2
+ %4 = add i32 %0, 1
+ %5 = icmp eq i32 %4, %size
+ br i1 %5, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+; FUNC: @no_memset
+; R600-NOT: @llvm.memset
+; R600-NOT: @memset_pattern16
+; SI-NOT: @llvm.memset
+; SI-NOT: @memset_pattern16
+define void @no_memset(i32 %size) {
+entry:
+ %dest = alloca i8, i32 32
+ br label %for.body
+
+for.body:
+ %0 = phi i32 [0, %entry], [%2, %for.body]
+ %1 = getelementptr i8* %dest, i32 %0
+ store i8 0, i8* %1
+ %2 = add i32 %0, 1
+ %3 = icmp eq i32 %2, %size
+ br i1 %3, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
diff --git a/test/CodeGen/R600/mad_uint24.ll b/test/CodeGen/R600/mad_uint24.ll
index 66a070e..3dcadc9 100644
--- a/test/CodeGen/R600/mad_uint24.ll
+++ b/test/CodeGen/R600/mad_uint24.ll
@@ -26,14 +26,11 @@ entry:
; The order of A and B does not matter.
; EG-CHECK: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]], [[A]], [[B]], [[C]]
; The result must be sign-extended
-; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MAD_CHAN]], literal.x
-; EG-CHECK: 16
-; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
+; EG-CHECK: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
; EG-CHECK: 16
; SI-CHECK-LABEL: @i16_mad24
; SI-CHECK: V_MAD_U32_U24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 16, [[MAD]]
-; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 16, [[LSHL]]
+; SI-CHECK: V_BFE_I32 v{{[0-9]}}, [[MAD]], 0, 16
define void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
entry:
@@ -51,14 +48,11 @@ entry:
; The order of A and B does not matter.
; EG-CHECK: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]], [[A]], [[B]], [[C]]
; The result must be sign-extended
-; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MAD_CHAN]], literal.x
-; EG-CHECK: 24
-; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
-; EG-CHECK: 24
+; EG-CHECK: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
+; EG-CHECK: 8
; SI-CHECK-LABEL: @i8_mad24
; SI-CHECK: V_MAD_U32_U24 [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 24, [[MUL]]
-; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 24, [[LSHL]]
+; SI-CHECK: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 8
define void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
entry:
diff --git a/test/CodeGen/R600/mubuf.ll b/test/CodeGen/R600/mubuf.ll
new file mode 100644
index 0000000..2d5ddeb
--- /dev/null
+++ b/test/CodeGen/R600/mubuf.ll
@@ -0,0 +1,98 @@
+; RUN: llc < %s -march=r600 -mcpu=SI -show-mc-encoding -verify-machineinstrs | FileCheck %s
+
+;;;==========================================================================;;;
+;;; MUBUF LOAD TESTS
+;;;==========================================================================;;;
+
+; MUBUF load with an immediate byte offset that fits into 12-bits
+; CHECK-LABEL: @mubuf_load0
+; CHECK: BUFFER_LOAD_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 4 ; encoding: [0x04,0x80
+define void @mubuf_load0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %in, i64 1
+ %1 = load i32 addrspace(1)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; MUBUF load with the largest possible immediate offset
+; CHECK-LABEL: @mubuf_load1
+; CHECK: BUFFER_LOAD_UBYTE v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 4095 ; encoding: [0xff,0x8f
+define void @mubuf_load1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
+entry:
+ %0 = getelementptr i8 addrspace(1)* %in, i64 4095
+ %1 = load i8 addrspace(1)* %0
+ store i8 %1, i8 addrspace(1)* %out
+ ret void
+}
+
+; MUBUF load with an immediate byte offset that doesn't fit into 12-bits
+; CHECK-LABEL: @mubuf_load2
+; CHECK: BUFFER_LOAD_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 0 ; encoding: [0x00,0x80
+define void @mubuf_load2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %in, i64 1024
+ %1 = load i32 addrspace(1)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; MUBUF load with a 12-bit immediate offset and a register offset
+; CHECK-LABEL: @mubuf_load3
+; CHECK-NOT: ADD
+; CHECK: BUFFER_LOAD_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 4 ; encoding: [0x04,0x80
+define void @mubuf_load3(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i64 %offset) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %in, i64 %offset
+ %1 = getelementptr i32 addrspace(1)* %0, i64 1
+ %2 = load i32 addrspace(1)* %1
+ store i32 %2, i32 addrspace(1)* %out
+ ret void
+}
+
+;;;==========================================================================;;;
+;;; MUBUF STORE TESTS
+;;;==========================================================================;;;
+
+; MUBUF store with an immediate byte offset that fits into 12-bits
+; CHECK-LABEL: @mubuf_store0
+; CHECK: BUFFER_STORE_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 4 ; encoding: [0x04,0x80
+define void @mubuf_store0(i32 addrspace(1)* %out) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %out, i64 1
+ store i32 0, i32 addrspace(1)* %0
+ ret void
+}
+
+; MUBUF store with the largest possible immediate offset
+; CHECK-LABEL: @mubuf_store1
+; CHECK: BUFFER_STORE_BYTE v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 4095 ; encoding: [0xff,0x8f
+
+define void @mubuf_store1(i8 addrspace(1)* %out) {
+entry:
+ %0 = getelementptr i8 addrspace(1)* %out, i64 4095
+ store i8 0, i8 addrspace(1)* %0
+ ret void
+}
+
+; MUBUF store with an immediate byte offset that doesn't fit into 12-bits
+; CHECK-LABEL: @mubuf_store2
+; CHECK: BUFFER_STORE_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 0 ; encoding: [0x00,0x80
+define void @mubuf_store2(i32 addrspace(1)* %out) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %out, i64 1024
+ store i32 0, i32 addrspace(1)* %0
+ ret void
+}
+
+; MUBUF store with a 12-bit immediate offset and a register offset
+; CHECK-LABEL: @mubuf_store3
+; CHECK-NOT: ADD
+; CHECK: BUFFER_STORE_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 4 ; encoding: [0x04,0x80
+define void @mubuf_store3(i32 addrspace(1)* %out, i64 %offset) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %out, i64 %offset
+ %1 = getelementptr i32 addrspace(1)* %0, i64 1
+ store i32 0, i32 addrspace(1)* %1
+ ret void
+}
diff --git a/test/CodeGen/R600/mul.ll b/test/CodeGen/R600/mul.ll
index 8c27e28..e176148 100644
--- a/test/CodeGen/R600/mul.ll
+++ b/test/CodeGen/R600/mul.ll
@@ -40,3 +40,15 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+; SI-CHECK-LABEL: @trunc_i64_mul_to_i32
+; SI-CHECK: S_LOAD_DWORD
+; SI-CHECK: S_LOAD_DWORD
+; SI-CHECK: V_MUL_LO_I32
+; SI-CHECK: BUFFER_STORE_DWORD
+define void @trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+ %mul = mul i64 %b, %a
+ %trunc = trunc i64 %mul to i32
+ store i32 %trunc, i32 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/mul_uint24.ll b/test/CodeGen/R600/mul_uint24.ll
index 6e6d549..a413961 100644
--- a/test/CodeGen/R600/mul_uint24.ll
+++ b/test/CodeGen/R600/mul_uint24.ll
@@ -24,15 +24,11 @@ entry:
; The order of A and B does not matter.
; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]], [[A]], [[B]]
; The result must be sign-extended
-; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MUL_CHAN]], literal.x
-; EG-CHECK: 16
-; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
+; EG-CHECK: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
; EG-CHECK: 16
; SI-CHECK-LABEL: @i16_mul24
; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 16, [[MUL]]
-; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 16, [[LSHL]]
-
+; SI-CHECK: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 16,
define void @i16_mul24(i32 addrspace(1)* %out, i16 %a, i16 %b) {
entry:
%0 = mul i16 %a, %b
@@ -47,14 +43,10 @@ entry:
; The order of A and B does not matter.
; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]], [[A]], [[B]]
; The result must be sign-extended
-; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MUL_CHAN]], literal.x
-; EG-CHECK: 24
-; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
-; EG-CHECK: 24
+; EG-CHECK: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
; SI-CHECK-LABEL: @i8_mul24
; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 24, [[MUL]]
-; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 24, [[LSHL]]
+; SI-CHECK: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 8,
define void @i8_mul24(i32 addrspace(1)* %out, i8 %a, i8 %b) {
entry:
diff --git a/test/CodeGen/R600/or.ll b/test/CodeGen/R600/or.ll
index 35d23b3..2cc991e 100644
--- a/test/CodeGen/R600/or.ll
+++ b/test/CodeGen/R600/or.ll
@@ -1,13 +1,13 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
+;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI %s
-; EG-CHECK-LABEL: @or_v2i32
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG-LABEL: @or_v2i32
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @or_v2i32
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI-LABEL: @or_v2i32
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
@@ -18,17 +18,17 @@ define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in)
ret void
}
-; EG-CHECK-LABEL: @or_v4i32
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG-LABEL: @or_v4i32
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @or_v4i32
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI-LABEL: @or_v4i32
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
@@ -39,15 +39,91 @@ define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in)
ret void
}
-; EG-CHECK-LABEL: @or_i64
-; EG-CHECK-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
-; EG-CHECK-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
-; SI-CHECK-LABEL: @or_i64
-; SI-CHECK: V_OR_B32_e32 v{{[0-9]}}
-; SI-CHECK: V_OR_B32_e32 v{{[0-9]}}
-define void @or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
-entry:
- %0 = or i64 %a, %b
- store i64 %0, i64 addrspace(1)* %out
- ret void
+; SI-LABEL: @scalar_or_i32
+; SI: S_OR_B32
+define void @scalar_or_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+ %or = or i32 %a, %b
+ store i32 %or, i32 addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: @vector_or_i32
+; SI: V_OR_B32_e32 v{{[0-9]}}
+define void @vector_or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 %b) {
+ %loada = load i32 addrspace(1)* %a
+ %or = or i32 %loada, %b
+ store i32 %or, i32 addrspace(1)* %out
+ ret void
+}
+
+; EG-LABEL: @scalar_or_i64
+; EG-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
+; EG-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
+; SI-LABEL: @scalar_or_i64
+; SI: S_OR_B64
+define void @scalar_or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+ %or = or i64 %a, %b
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: @vector_or_i64
+; SI: V_OR_B32_e32 v{{[0-9]}}
+; SI: V_OR_B32_e32 v{{[0-9]}}
+define void @vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+ %loada = load i64 addrspace(1)* %a, align 8
+ %loadb = load i64 addrspace(1)* %a, align 8
+ %or = or i64 %loada, %loadb
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: @scalar_vector_or_i64
+; SI: V_OR_B32_e32 v{{[0-9]}}
+; SI: V_OR_B32_e32 v{{[0-9]}}
+define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 %b) {
+ %loada = load i64 addrspace(1)* %a
+ %or = or i64 %loada, %b
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: @vector_or_i64_loadimm
+; SI-DAG: S_MOV_B32 [[LO_S_IMM:s[0-9]+]], -545810305
+; SI-DAG: S_MOV_B32 [[HI_S_IMM:s[0-9]+]], 5231
+; SI-DAG: BUFFER_LOAD_DWORDX2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
+; SI-DAG: V_OR_B32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
+; SI-DAG: V_OR_B32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
+; SI: S_ENDPGM
+define void @vector_or_i64_loadimm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+ %loada = load i64 addrspace(1)* %a, align 8
+ %or = or i64 %loada, 22470723082367
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; FIXME: The or 0 should really be removed.
+; SI-LABEL: @vector_or_i64_imm
+; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
+; SI: V_OR_B32_e32 {{v[0-9]+}}, 8, v[[LO_VREG]]
+; SI: V_OR_B32_e32 {{v[0-9]+}}, 0, {{.*}}
+; SI: S_ENDPGM
+define void @vector_or_i64_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+ %loada = load i64 addrspace(1)* %a, align 8
+ %or = or i64 %loada, 8
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: @trunc_i64_or_to_i32
+; SI: S_LOAD_DWORD [[SREG0:s[0-9]+]],
+; SI: S_LOAD_DWORD [[SREG1:s[0-9]+]],
+; SI: S_OR_B32 [[SRESULT:s[0-9]+]], [[SREG1]], [[SREG0]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+define void @trunc_i64_or_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+ %add = or i64 %b, %a
+ %trunc = trunc i64 %add to i32
+ store i32 %trunc, i32 addrspace(1)* %out, align 8
+ ret void
}
diff --git a/test/CodeGen/R600/private-memory.ll b/test/CodeGen/R600/private-memory.ll
index 48a013c..4920320 100644
--- a/test/CodeGen/R600/private-memory.ll
+++ b/test/CodeGen/R600/private-memory.ll
@@ -1,10 +1,11 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
; This test checks that uses and defs of the AR register happen in the same
; instruction clause.
-; R600-CHECK-LABEL: @mova_same_clause
+; FUNC-LABEL: @mova_same_clause
+
; R600-CHECK: MOVA_INT
; R600-CHECK-NOT: ALU clause
; R600-CHECK: 0 + AR.x
@@ -12,11 +13,10 @@
; R600-CHECK-NOT: ALU clause
; R600-CHECK: 0 + AR.x
-; SI-CHECK-LABEL: @mova_same_clause
-; SI-CHECK: V_READFIRSTLANE
+; SI-CHECK: V_READFIRSTLANE_B32 vcc_lo
; SI-CHECK: V_MOVRELD
; SI-CHECK: S_CBRANCH
-; SI-CHECK: V_READFIRSTLANE
+; SI-CHECK: V_READFIRSTLANE_B32 vcc_lo
; SI-CHECK: V_MOVRELD
; SI-CHECK: S_CBRANCH
define void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
@@ -46,9 +46,8 @@ entry:
; XXX: This generated code has unnecessary MOVs, we should be able to optimize
; this.
-; R600-CHECK-LABEL: @multiple_structs
+; FUNC-LABEL: @multiple_structs
; R600-CHECK-NOT: MOVA_INT
-; SI-CHECK-LABEL: @multiple_structs
; SI-CHECK-NOT: V_MOVREL
%struct.point = type { i32, i32 }
@@ -77,9 +76,8 @@ entry:
; loads and stores should be lowered to copies, so there shouldn't be any
; MOVA instructions.
-; R600-CHECK-LABLE: @direct_loop
+; FUNC-LABEL: @direct_loop
; R600-CHECK-NOT: MOVA_INT
-; SI-CHECK-LABEL: @direct_loop
; SI-CHECK-NOT: V_MOVREL
define void @direct_loop(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
@@ -113,3 +111,106 @@ for.end:
store i32 %value, i32 addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @short_array
+
+; R600-CHECK: MOV {{\** *}}T{{[0-9]\.[XYZW]}}, literal
+; R600-CHECK: 65536
+; R600-CHECK: *
+; R600-CHECK: MOVA_INT
+
+; SI-CHECK: V_MOV_B32_e32 v{{[0-9]}}, 65536
+; SI-CHECK: V_MOVRELS_B32_e32
+define void @short_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %0 = alloca [2 x i16]
+ %1 = getelementptr [2 x i16]* %0, i32 0, i32 0
+ %2 = getelementptr [2 x i16]* %0, i32 0, i32 1
+ store i16 0, i16* %1
+ store i16 1, i16* %2
+ %3 = getelementptr [2 x i16]* %0, i32 0, i32 %index
+ %4 = load i16* %3
+ %5 = sext i16 %4 to i32
+ store i32 %5, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @char_array
+
+; R600-CHECK: OR_INT {{\** *}}T{{[0-9]\.[XYZW]}}, {{[PVT0-9]+\.[XYZW]}}, literal
+; R600-CHECK: 256
+; R600-CHECK: *
+; R600-CHECK-NEXT: MOVA_INT
+
+; SI-CHECK: V_OR_B32_e32 v{{[0-9]}}, 256
+; SI-CHECK: V_MOVRELS_B32_e32
+define void @char_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %0 = alloca [2 x i8]
+ %1 = getelementptr [2 x i8]* %0, i32 0, i32 0
+ %2 = getelementptr [2 x i8]* %0, i32 0, i32 1
+ store i8 0, i8* %1
+ store i8 1, i8* %2
+ %3 = getelementptr [2 x i8]* %0, i32 0, i32 %index
+ %4 = load i8* %3
+ %5 = sext i8 %4 to i32
+ store i32 %5, i32 addrspace(1)* %out
+ ret void
+
+}
+
+; Make sure we don't overwrite workitem information with private memory
+
+; FUNC-LABEL: @work_item_info
+; R600-CHECK-NOT: MOV T0.X
+; Additional check in case the move ends up in the last slot
+; R600-CHECK-NOT: MOV * TO.X
+
+; SI-CHECK-NOT: V_MOV_B32_e{{(32|64)}} v0
+define void @work_item_info(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %0 = alloca [2 x i32]
+ %1 = getelementptr [2 x i32]* %0, i32 0, i32 0
+ %2 = getelementptr [2 x i32]* %0, i32 0, i32 1
+ store i32 0, i32* %1
+ store i32 1, i32* %2
+ %3 = getelementptr [2 x i32]* %0, i32 0, i32 %in
+ %4 = load i32* %3
+ %5 = call i32 @llvm.r600.read.tidig.x()
+ %6 = add i32 %4, %5
+ store i32 %6, i32 addrspace(1)* %out
+ ret void
+}
+
+; Test that two stack objects are not stored in the same register
+; The second stack object should be in T3.X
+; FUNC-LABEL: @no_overlap
+; R600-CHECK: MOV {{\** *}}T3.X
+; SI-CHECK: V_MOV_B32_e32 v3
+define void @no_overlap(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %0 = alloca [3 x i8], align 1
+ %1 = alloca [2 x i8], align 1
+ %2 = getelementptr [3 x i8]* %0, i32 0, i32 0
+ %3 = getelementptr [3 x i8]* %0, i32 0, i32 1
+ %4 = getelementptr [3 x i8]* %0, i32 0, i32 2
+ %5 = getelementptr [2 x i8]* %1, i32 0, i32 0
+ %6 = getelementptr [2 x i8]* %1, i32 0, i32 1
+ store i8 0, i8* %2
+ store i8 1, i8* %3
+ store i8 2, i8* %4
+ store i8 1, i8* %5
+ store i8 0, i8* %6
+ %7 = getelementptr [3 x i8]* %0, i32 0, i32 %in
+ %8 = getelementptr [2 x i8]* %1, i32 0, i32 %in
+ %9 = load i8* %7
+ %10 = load i8* %8
+ %11 = add i8 %9, %10
+ %12 = sext i8 %11 to i32
+ store i32 %12, i32 addrspace(1)* %out
+ ret void
+}
+
+
+
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
diff --git a/test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll b/test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll
new file mode 100644
index 0000000..c89398f
--- /dev/null
+++ b/test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll
@@ -0,0 +1,59 @@
+;RUN: llc < %s -march=r600 -mcpu=cayman
+;REQUIRES: asserts
+
+define void @main(<4 x float> inreg, <4 x float> inreg) #0 {
+main_body:
+ %2 = extractelement <4 x float> %0, i32 0
+ %3 = extractelement <4 x float> %0, i32 1
+ %4 = extractelement <4 x float> %0, i32 2
+ %5 = extractelement <4 x float> %0, i32 3
+ %6 = insertelement <4 x float> undef, float %2, i32 0
+ %7 = insertelement <4 x float> %6, float %3, i32 1
+ %8 = insertelement <4 x float> %7, float %4, i32 2
+ %9 = insertelement <4 x float> %8, float %5, i32 3
+ %10 = call <4 x float> @llvm.AMDGPU.cube(<4 x float> %9)
+ %11 = extractelement <4 x float> %10, i32 0
+ %12 = extractelement <4 x float> %10, i32 1
+ %13 = extractelement <4 x float> %10, i32 2
+ %14 = extractelement <4 x float> %10, i32 3
+ %15 = call float @fabs(float %13)
+ %16 = fdiv float 1.000000e+00, %15
+ %17 = fmul float %11, %16
+ %18 = fadd float %17, 1.500000e+00
+ %19 = fmul float %12, %16
+ %20 = fadd float %19, 1.500000e+00
+ %21 = insertelement <4 x float> undef, float %20, i32 0
+ %22 = insertelement <4 x float> %21, float %18, i32 1
+ %23 = insertelement <4 x float> %22, float %14, i32 2
+ %24 = insertelement <4 x float> %23, float %5, i32 3
+ %25 = extractelement <4 x float> %24, i32 0
+ %26 = extractelement <4 x float> %24, i32 1
+ %27 = extractelement <4 x float> %24, i32 2
+ %28 = extractelement <4 x float> %24, i32 3
+ %29 = insertelement <4 x float> undef, float %25, i32 0
+ %30 = insertelement <4 x float> %29, float %26, i32 1
+ %31 = insertelement <4 x float> %30, float %27, i32 2
+ %32 = insertelement <4 x float> %31, float %28, i32 3
+ %33 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %32, i32 16, i32 0, i32 13)
+ %34 = extractelement <4 x float> %33, i32 0
+ %35 = insertelement <4 x float> undef, float %34, i32 0
+ %36 = insertelement <4 x float> %35, float %34, i32 1
+ %37 = insertelement <4 x float> %36, float %34, i32 2
+ %38 = insertelement <4 x float> %37, float 1.000000e+00, i32 3
+ call void @llvm.R600.store.swizzle(<4 x float> %38, i32 0, i32 0)
+ ret void
+}
+
+; Function Attrs: readnone
+declare <4 x float> @llvm.AMDGPU.cube(<4 x float>) #1
+
+; Function Attrs: readnone
+declare float @fabs(float) #1
+
+; Function Attrs: readnone
+declare <4 x float> @llvm.AMDGPU.tex(<4 x float>, i32, i32, i32) #1
+
+declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { readnone }
diff --git a/test/CodeGen/R600/register-count-comments.ll b/test/CodeGen/R600/register-count-comments.ll
new file mode 100644
index 0000000..a64b280
--- /dev/null
+++ b/test/CodeGen/R600/register-count-comments.ll
@@ -0,0 +1,20 @@
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+declare i32 @llvm.SI.tid() nounwind readnone
+
+; SI-LABEL: @foo:
+; SI: .section .AMDGPU.csdata
+; SI: ; Kernel info:
+; SI: ; NumSgprs: {{[0-9]+}}
+; SI: ; NumVgprs: {{[0-9]+}}
+define void @foo(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %abase, i32 addrspace(1)* %bbase) nounwind {
+ %tid = call i32 @llvm.SI.tid() nounwind readnone
+ %aptr = getelementptr i32 addrspace(1)* %abase, i32 %tid
+ %bptr = getelementptr i32 addrspace(1)* %bbase, i32 %tid
+ %outptr = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %result = add i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %outptr, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/salu-to-valu.ll b/test/CodeGen/R600/salu-to-valu.ll
new file mode 100644
index 0000000..e461bf9
--- /dev/null
+++ b/test/CodeGen/R600/salu-to-valu.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+
+; In this test both the pointer and the offset operands to the
+; BUFFER_LOAD instructions end up being stored in vgprs. This
+; requires us to add the pointer and offset together, store the
+; result in the offset operand (vaddr), and then store 0 in an
+; sgpr register pair and use that for the pointer operand
+; (low 64-bits of srsrc).
+
+; CHECK-LABEL: @mubuf
+
+; Make sure we aren't using VGPRs for the source operand of S_MOV_B64
+; CHECK-NOT: S_MOV_B64 s[{{[0-9]+:[0-9]+}}], v
+
+; Make sure we aren't using VGPR's for the srsrc operand of BUFFER_LOAD_*
+; instructions
+; CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}]
+; CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}]
+define void @mubuf(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
+entry:
+ %0 = call i32 @llvm.r600.read.tidig.x() #1
+ %1 = call i32 @llvm.r600.read.tidig.y() #1
+ %2 = sext i32 %0 to i64
+ %3 = sext i32 %1 to i64
+ br label %loop
+
+loop:
+ %4 = phi i64 [0, %entry], [%5, %loop]
+ %5 = add i64 %2, %4
+ %6 = getelementptr i8 addrspace(1)* %in, i64 %5
+ %7 = load i8 addrspace(1)* %6, align 1
+ %8 = or i64 %5, 1
+ %9 = getelementptr i8 addrspace(1)* %in, i64 %8
+ %10 = load i8 addrspace(1)* %9, align 1
+ %11 = add i8 %7, %10
+ %12 = sext i8 %11 to i32
+ store i32 %12, i32 addrspace(1)* %out
+ %13 = icmp slt i64 %5, 10
+ br i1 %13, label %loop, label %done
+
+done:
+ ret void
+}
+
+declare i32 @llvm.r600.read.tidig.x() #1
+declare i32 @llvm.r600.read.tidig.y() #1
+
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll b/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
new file mode 100644
index 0000000..2a286d1
--- /dev/null
+++ b/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
@@ -0,0 +1,162 @@
+; XFAIL: *
+; REQUIRES: asserts
+; RUN: llc -O0 -march=r600 -mcpu=SI < %s | FileCheck %s -check-prefix=SI
+
+declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
+
+
+; SI-LABEL: @main(
+define void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1) #0 {
+main_body:
+ %0 = extractelement <4 x float> %reg1, i32 0
+ %1 = extractelement <4 x float> %reg1, i32 2
+ %2 = fcmp ult float %0, 0.000000e+00
+ %3 = select i1 %2, float 1.000000e+00, float 0.000000e+00
+ %4 = fsub float -0.000000e+00, %3
+ %5 = fptosi float %4 to i32
+ %6 = bitcast i32 %5 to float
+ %7 = bitcast float %6 to i32
+ %8 = icmp ne i32 %7, 0
+ br i1 %8, label %LOOP, label %ENDIF
+
+Flow1: ; preds = %ENDIF19, %ENDIF16
+ %9 = phi float [ %115, %ENDIF19 ], [ undef, %ENDIF16 ]
+ %10 = phi float [ %114, %ENDIF19 ], [ undef, %ENDIF16 ]
+ %11 = phi float [ %113, %ENDIF19 ], [ undef, %ENDIF16 ]
+ %12 = phi float [ %112, %ENDIF19 ], [ undef, %ENDIF16 ]
+ %13 = phi float [ %111, %ENDIF19 ], [ undef, %ENDIF16 ]
+ %14 = phi i1 [ false, %ENDIF19 ], [ true, %ENDIF16 ]
+ br label %Flow
+
+Flow2: ; preds = %Flow
+ br label %ENDIF
+
+ENDIF: ; preds = %main_body, %Flow2
+ %temp.0 = phi float [ 0.000000e+00, %main_body ], [ %104, %Flow2 ]
+ %temp1.0 = phi float [ 1.000000e+00, %main_body ], [ %103, %Flow2 ]
+ %temp2.0 = phi float [ 0.000000e+00, %main_body ], [ %102, %Flow2 ]
+ %temp3.0 = phi float [ 0.000000e+00, %main_body ], [ %101, %Flow2 ]
+ %15 = extractelement <4 x float> %reg1, i32 1
+ %16 = extractelement <4 x float> %reg1, i32 3
+ %17 = load <4 x float> addrspace(9)* null
+ %18 = extractelement <4 x float> %17, i32 0
+ %19 = fmul float %18, %0
+ %20 = load <4 x float> addrspace(9)* null
+ %21 = extractelement <4 x float> %20, i32 1
+ %22 = fmul float %21, %0
+ %23 = load <4 x float> addrspace(9)* null
+ %24 = extractelement <4 x float> %23, i32 2
+ %25 = fmul float %24, %0
+ %26 = load <4 x float> addrspace(9)* null
+ %27 = extractelement <4 x float> %26, i32 3
+ %28 = fmul float %27, %0
+ %29 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+ %30 = extractelement <4 x float> %29, i32 0
+ %31 = fmul float %30, %15
+ %32 = fadd float %31, %19
+ %33 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+ %34 = extractelement <4 x float> %33, i32 1
+ %35 = fmul float %34, %15
+ %36 = fadd float %35, %22
+ %37 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+ %38 = extractelement <4 x float> %37, i32 2
+ %39 = fmul float %38, %15
+ %40 = fadd float %39, %25
+ %41 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+ %42 = extractelement <4 x float> %41, i32 3
+ %43 = fmul float %42, %15
+ %44 = fadd float %43, %28
+ %45 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+ %46 = extractelement <4 x float> %45, i32 0
+ %47 = fmul float %46, %1
+ %48 = fadd float %47, %32
+ %49 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+ %50 = extractelement <4 x float> %49, i32 1
+ %51 = fmul float %50, %1
+ %52 = fadd float %51, %36
+ %53 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+ %54 = extractelement <4 x float> %53, i32 2
+ %55 = fmul float %54, %1
+ %56 = fadd float %55, %40
+ %57 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+ %58 = extractelement <4 x float> %57, i32 3
+ %59 = fmul float %58, %1
+ %60 = fadd float %59, %44
+ %61 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+ %62 = extractelement <4 x float> %61, i32 0
+ %63 = fmul float %62, %16
+ %64 = fadd float %63, %48
+ %65 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+ %66 = extractelement <4 x float> %65, i32 1
+ %67 = fmul float %66, %16
+ %68 = fadd float %67, %52
+ %69 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+ %70 = extractelement <4 x float> %69, i32 2
+ %71 = fmul float %70, %16
+ %72 = fadd float %71, %56
+ %73 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+ %74 = extractelement <4 x float> %73, i32 3
+ %75 = fmul float %74, %16
+ %76 = fadd float %75, %60
+ %77 = insertelement <4 x float> undef, float %64, i32 0
+ %78 = insertelement <4 x float> %77, float %68, i32 1
+ %79 = insertelement <4 x float> %78, float %72, i32 2
+ %80 = insertelement <4 x float> %79, float %76, i32 3
+ call void @llvm.AMDGPU.barrier.local()
+ %81 = insertelement <4 x float> undef, float %temp.0, i32 0
+ %82 = insertelement <4 x float> %81, float %temp1.0, i32 1
+ %83 = insertelement <4 x float> %82, float %temp2.0, i32 2
+ %84 = insertelement <4 x float> %83, float %temp3.0, i32 3
+ call void @llvm.AMDGPU.barrier.local()
+ ret void
+
+LOOP: ; preds = %main_body, %Flow
+ %temp.1 = phi float [ %109, %Flow ], [ 0.000000e+00, %main_body ]
+ %temp1.1 = phi float [ %108, %Flow ], [ 1.000000e+00, %main_body ]
+ %temp2.1 = phi float [ %107, %Flow ], [ 0.000000e+00, %main_body ]
+ %temp3.1 = phi float [ %106, %Flow ], [ 0.000000e+00, %main_body ]
+ %temp4.0 = phi float [ %105, %Flow ], [ -2.000000e+00, %main_body ]
+ %85 = fcmp uge float %temp4.0, %0
+ %86 = select i1 %85, float 1.000000e+00, float 0.000000e+00
+ %87 = fsub float -0.000000e+00, %86
+ %88 = fptosi float %87 to i32
+ %89 = bitcast i32 %88 to float
+ %90 = bitcast float %89 to i32
+ %91 = icmp ne i32 %90, 0
+ %92 = xor i1 %91, true
+ br i1 %92, label %ENDIF16, label %Flow
+
+ENDIF16: ; preds = %LOOP
+ %93 = fcmp une float %1, %temp4.0
+ %94 = select i1 %93, float 1.000000e+00, float 0.000000e+00
+ %95 = fsub float -0.000000e+00, %94
+ %96 = fptosi float %95 to i32
+ %97 = bitcast i32 %96 to float
+ %98 = bitcast float %97 to i32
+ %99 = icmp ne i32 %98, 0
+ %100 = xor i1 %99, true
+ br i1 %100, label %ENDIF19, label %Flow1
+
+Flow: ; preds = %Flow1, %LOOP
+ %101 = phi float [ %temp3.1, %Flow1 ], [ %temp3.1, %LOOP ]
+ %102 = phi float [ %temp2.1, %Flow1 ], [ %temp2.1, %LOOP ]
+ %103 = phi float [ %temp1.1, %Flow1 ], [ %temp1.1, %LOOP ]
+ %104 = phi float [ %temp.1, %Flow1 ], [ %temp.1, %LOOP ]
+ %105 = phi float [ %9, %Flow1 ], [ undef, %LOOP ]
+ %106 = phi float [ %10, %Flow1 ], [ undef, %LOOP ]
+ %107 = phi float [ %11, %Flow1 ], [ undef, %LOOP ]
+ %108 = phi float [ %12, %Flow1 ], [ undef, %LOOP ]
+ %109 = phi float [ %13, %Flow1 ], [ undef, %LOOP ]
+ %110 = phi i1 [ %14, %Flow1 ], [ true, %LOOP ]
+ br i1 %110, label %Flow2, label %LOOP
+
+ENDIF19: ; preds = %ENDIF16
+ %111 = fadd float %temp.1, 1.000000e+00
+ %112 = fadd float %temp1.1, 0.000000e+00
+ %113 = fadd float %temp2.1, 0.000000e+00
+ %114 = fadd float %temp3.1, 0.000000e+00
+ %115 = fadd float %temp4.0, 1.000000e+00
+ br label %Flow1
+}
+
+attributes #0 = { "ShaderType"="1" }
diff --git a/test/CodeGen/R600/select-vectors.ll b/test/CodeGen/R600/select-vectors.ll
new file mode 100644
index 0000000..94605fe
--- /dev/null
+++ b/test/CodeGen/R600/select-vectors.ll
@@ -0,0 +1,155 @@
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; Test expansion of scalar selects on vectors.
+; Evergreen not enabled since it seems to be having problems with doubles.
+
+
+; FUNC-LABEL: @select_v4i8
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b, i8 %c) nounwind {
+ %cmp = icmp eq i8 %c, 0
+ %select = select i1 %cmp, <4 x i8> %a, <4 x i8> %b
+ store <4 x i8> %select, <4 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @select_v4i16
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <4 x i16> %a, <4 x i16> %b
+ store <4 x i16> %select, <4 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @select_v2i32
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: BUFFER_STORE_DWORDX2
+define void @select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <2 x i32> %a, <2 x i32> %b
+ store <2 x i32> %select, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @select_v4i32
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: BUFFER_STORE_DWORDX4
+define void @select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <4 x i32> %a, <4 x i32> %b
+ store <4 x i32> %select, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v8i32
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <8 x i32> %a, <8 x i32> %b
+ store <8 x i32> %select, <8 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v2f32
+; SI: BUFFER_STORE_DWORDX2
+define void @select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <2 x float> %a, <2 x float> %b
+ store <2 x float> %select, <2 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v4f32
+; SI: BUFFER_STORE_DWORDX4
+define void @select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <4 x float> %a, <4 x float> %b
+ store <4 x float> %select, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v8f32
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <8 x float> %a, <8 x float> %b
+ store <8 x float> %select, <8 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v2f64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <2 x double> %a, <2 x double> %b
+ store <2 x double> %select, <2 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v4f64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <4 x double> %a, <4 x double> %b
+ store <4 x double> %select, <4 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v8f64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <8 x double> %a, <8 x double> %b
+ store <8 x double> %select, <8 x double> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/select64.ll b/test/CodeGen/R600/select64.ll
new file mode 100644
index 0000000..6b87d98
--- /dev/null
+++ b/test/CodeGen/R600/select64.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+
+; CHECK-LABEL: @select0
+; i64 select should be split into two i32 selects, and we shouldn't need
+; to use a shfit to extract the hi dword of the input.
+; CHECK-NOT: S_LSHR_B64
+; CHECK: V_CNDMASK
+; CHECK: V_CNDMASK
+define void @select0(i64 addrspace(1)* %out, i32 %cond, i64 %in) {
+entry:
+ %0 = icmp ugt i32 %cond, 5
+ %1 = select i1 %0, i64 0, i64 %in
+ store i64 %1, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/setcc-equivalent.ll b/test/CodeGen/R600/setcc-equivalent.ll
new file mode 100644
index 0000000..4c50aa3
--- /dev/null
+++ b/test/CodeGen/R600/setcc-equivalent.ll
@@ -0,0 +1,30 @@
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
+
+; EG-LABEL: @and_setcc_setcc_i32
+; EG: AND_INT
+; EG-NEXT: SETE_INT
+define void @and_setcc_setcc_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+ %cmp1 = icmp eq i32 %a, -1
+ %cmp2 = icmp eq i32 %b, -1
+ %and = and i1 %cmp1, %cmp2
+ %ext = sext i1 %and to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; EG-LABEL: @and_setcc_setcc_v4i32
+; EG: AND_INT
+; EG: AND_INT
+; EG: SETE_INT
+; EG: AND_INT
+; EG: SETE_INT
+; EG: AND_INT
+; EG: SETE_INT
+define void @and_setcc_setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) {
+ %cmp1 = icmp eq <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %cmp2 = icmp eq <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %and = and <4 x i1> %cmp1, %cmp2
+ %ext = sext <4 x i1> %and to <4 x i32>
+ store <4 x i32> %ext, <4 x i32> addrspace(1)* %out, align 4
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/R600/sext-in-reg.ll b/test/CodeGen/R600/sext-in-reg.ll
new file mode 100644
index 0000000..eef3f07
--- /dev/null
+++ b/test/CodeGen/R600/sext-in-reg.ll
@@ -0,0 +1,271 @@
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=cypress | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.imax(i32, i32) nounwind readnone
+
+
+; FUNC-LABEL: @sext_in_reg_i1_i32
+; SI: S_LOAD_DWORD [[ARG:s[0-9]+]],
+; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], [[ARG]], 0, 1
+; SI: BUFFER_STORE_DWORD [[EXTRACT]],
+
+; EG: BFE_INT
+define void @sext_in_reg_i1_i32(i32 addrspace(1)* %out, i32 %in) {
+ %shl = shl i32 %in, 31
+ %sext = ashr i32 %shl, 31
+ store i32 %sext, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i8_to_i32
+; SI: S_ADD_I32 [[VAL:s[0-9]+]],
+; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], [[VAL]], 0, 8
+; SI: BUFFER_STORE_DWORD [[EXTRACT]],
+
+; EG: BFE_INT
+define void @sext_in_reg_i8_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %c = add i32 %a, %b ; add to prevent folding into extload
+ %shl = shl i32 %c, 24
+ %ashr = ashr i32 %shl, 24
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i16_to_i32
+; SI: S_ADD_I32 [[VAL:s[0-9]+]],
+; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], [[VAL]], 0, 16
+; SI: BUFFER_STORE_DWORD [[EXTRACT]],
+
+; EG: BFE_INT
+define void @sext_in_reg_i16_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %c = add i32 %a, %b ; add to prevent folding into extload
+ %shl = shl i32 %c, 16
+ %ashr = ashr i32 %shl, 16
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i8_to_v1i32
+; SI: S_ADD_I32 [[VAL:s[0-9]+]],
+; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], [[VAL]], 0, 8
+; SI: BUFFER_STORE_DWORD [[EXTRACT]],
+
+; EG: BFE_INT
+define void @sext_in_reg_i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) nounwind {
+ %c = add <1 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <1 x i32> %c, <i32 24>
+ %ashr = ashr <1 x i32> %shl, <i32 24>
+ store <1 x i32> %ashr, <1 x i32> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i8_to_i64
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 8
+; SI: V_ASHRREV_I32_e32 {{v[0-9]+}}, 31,
+; SI: BUFFER_STORE_DWORD
+
+; EG: BFE_INT
+; EG: ASHR
+define void @sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %c = add i64 %a, %b
+ %shl = shl i64 %c, 56
+ %ashr = ashr i64 %shl, 56
+ store i64 %ashr, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i16_to_i64
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 16
+; SI: V_ASHRREV_I32_e32 {{v[0-9]+}}, 31,
+; SI: BUFFER_STORE_DWORD
+
+; EG: BFE_INT
+; EG: ASHR
+define void @sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %c = add i64 %a, %b
+ %shl = shl i64 %c, 48
+ %ashr = ashr i64 %shl, 48
+ store i64 %ashr, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i32_to_i64
+; SI: S_LOAD_DWORD
+; SI: S_LOAD_DWORD
+; SI: S_ADD_I32 [[ADD:s[0-9]+]],
+; SI: S_ASHR_I32 s{{[0-9]+}}, [[ADD]], 31
+; SI: BUFFER_STORE_DWORDX2
+define void @sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %c = add i64 %a, %b
+ %shl = shl i64 %c, 32
+ %ashr = ashr i64 %shl, 32
+ store i64 %ashr, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; This is broken on Evergreen for some reason related to the <1 x i64> kernel arguments.
+; XFUNC-LABEL: @sext_in_reg_i8_to_v1i64
+; XSI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 8
+; XSI: V_ASHRREV_I32_e32 {{v[0-9]+}}, 31,
+; XSI: BUFFER_STORE_DWORD
+; XEG: BFE_INT
+; XEG: ASHR
+; define void @sext_in_reg_i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a, <1 x i64> %b) nounwind {
+; %c = add <1 x i64> %a, %b
+; %shl = shl <1 x i64> %c, <i64 56>
+; %ashr = ashr <1 x i64> %shl, <i64 56>
+; store <1 x i64> %ashr, <1 x i64> addrspace(1)* %out, align 8
+; ret void
+; }
+
+; FUNC-LABEL: @sext_in_reg_i1_in_i32_other_amount
+; SI-NOT: BFE
+; SI: S_LSHL_B32 [[REG:s[0-9]+]], {{s[0-9]+}}, 6
+; SI: S_ASHR_I32 {{s[0-9]+}}, [[REG]], 7
+; EG-NOT: BFE
+define void @sext_in_reg_i1_in_i32_other_amount(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %c = add i32 %a, %b
+ %x = shl i32 %c, 6
+ %y = ashr i32 %x, 7
+ store i32 %y, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_v2i1_in_v2i32_other_amount
+; SI: S_LSHL_B32 [[REG0:s[0-9]+]], {{s[0-9]}}, 6
+; SI: S_ASHR_I32 {{s[0-9]+}}, [[REG0]], 7
+; SI: S_LSHL_B32 [[REG1:s[0-9]+]], {{s[0-9]}}, 6
+; SI: S_ASHR_I32 {{s[0-9]+}}, [[REG1]], 7
+; EG-NOT: BFE
+define void @sext_in_reg_v2i1_in_v2i32_other_amount(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+ %c = add <2 x i32> %a, %b
+ %x = shl <2 x i32> %c, <i32 6, i32 6>
+ %y = ashr <2 x i32> %x, <i32 7, i32 7>
+ store <2 x i32> %y, <2 x i32> addrspace(1)* %out, align 2
+ ret void
+}
+
+
+; FUNC-LABEL: @sext_in_reg_v2i1_to_v2i32
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 1
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 1
+; SI: BUFFER_STORE_DWORDX2
+; EG: BFE
+; EG: BFE
+define void @sext_in_reg_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+ %c = add <2 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <2 x i32> %c, <i32 31, i32 31>
+ %ashr = ashr <2 x i32> %shl, <i32 31, i32 31>
+ store <2 x i32> %ashr, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_v4i1_to_v4i32
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 1
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 1
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 1
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 1
+; SI: BUFFER_STORE_DWORDX4
+
+; EG: BFE
+; EG: BFE
+; EG: BFE
+; EG: BFE
+define void @sext_in_reg_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) nounwind {
+ %c = add <4 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <4 x i32> %c, <i32 31, i32 31, i32 31, i32 31>
+ %ashr = ashr <4 x i32> %shl, <i32 31, i32 31, i32 31, i32 31>
+ store <4 x i32> %ashr, <4 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_v2i8_to_v2i32
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 8
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 8
+; SI: BUFFER_STORE_DWORDX2
+
+; EG: BFE
+; EG: BFE
+define void @sext_in_reg_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+ %c = add <2 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <2 x i32> %c, <i32 24, i32 24>
+ %ashr = ashr <2 x i32> %shl, <i32 24, i32 24>
+ store <2 x i32> %ashr, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_v4i8_to_v4i32
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 8
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 8
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 8
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 8
+; SI: BUFFER_STORE_DWORDX4
+
+; EG: BFE
+; EG: BFE
+; EG: BFE
+; EG: BFE
+define void @sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) nounwind {
+ %c = add <4 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <4 x i32> %c, <i32 24, i32 24, i32 24, i32 24>
+ %ashr = ashr <4 x i32> %shl, <i32 24, i32 24, i32 24, i32 24>
+ store <4 x i32> %ashr, <4 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_v2i16_to_v2i32
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 8
+; SI: V_BFE_I32 {{v[0-9]+}}, {{s[0-9]+}}, 0, 8
+; SI: BUFFER_STORE_DWORDX2
+
+; EG: BFE
+; EG: BFE
+define void @sext_in_reg_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+ %c = add <2 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <2 x i32> %c, <i32 24, i32 24>
+ %ashr = ashr <2 x i32> %shl, <i32 24, i32 24>
+ store <2 x i32> %ashr, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @testcase
+define void @testcase(i8 addrspace(1)* %out, i8 %a) nounwind {
+ %and_a_1 = and i8 %a, 1
+ %cmp_eq = icmp eq i8 %and_a_1, 0
+ %cmp_slt = icmp slt i8 %a, 0
+ %sel0 = select i1 %cmp_slt, i8 0, i8 %a
+ %sel1 = select i1 %cmp_eq, i8 0, i8 %a
+ %xor = xor i8 %sel0, %sel1
+ store i8 %xor, i8 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @testcase_3
+define void @testcase_3(i8 addrspace(1)* %out, i8 %a) nounwind {
+ %and_a_1 = and i8 %a, 1
+ %cmp_eq = icmp eq i8 %and_a_1, 0
+ %cmp_slt = icmp slt i8 %a, 0
+ %sel0 = select i1 %cmp_slt, i8 0, i8 %a
+ %sel1 = select i1 %cmp_eq, i8 0, i8 %a
+ %xor = xor i8 %sel0, %sel1
+ store i8 %xor, i8 addrspace(1)* %out
+ ret void
+}
+
+; FIXME: The BFE should really be eliminated. I think it should happen
+; when computeMaskedBitsForTargetNode is implemented for imax.
+
+; FUNC-LABEL: @sext_in_reg_to_illegal_type
+; SI: BUFFER_LOAD_SBYTE
+; SI: V_MAX_I32
+; SI: V_BFE_I32
+; SI: BUFFER_STORE_SHORT
+define void @sext_in_reg_to_illegal_type(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) nounwind {
+ %tmp5 = load i8 addrspace(1)* %src, align 1
+ %tmp2 = sext i8 %tmp5 to i32
+ %tmp3 = tail call i32 @llvm.AMDGPU.imax(i32 %tmp2, i32 0) nounwind readnone
+ %tmp4 = trunc i32 %tmp3 to i8
+ %tmp6 = sext i8 %tmp4 to i16
+ store i16 %tmp6, i16 addrspace(1)* %out, align 2
+ ret void
+}
diff --git a/test/CodeGen/R600/si-annotate-cf-assertion.ll b/test/CodeGen/R600/si-annotate-cf-assertion.ll
index 9886fe9..cd3ba2b 100644
--- a/test/CodeGen/R600/si-annotate-cf-assertion.ll
+++ b/test/CodeGen/R600/si-annotate-cf-assertion.ll
@@ -1,3 +1,4 @@
+; REQUIRES: asserts
; XFAIL: *
; RUN: llc -march=r600 -mcpu=SI -asm-verbose=false < %s | FileCheck %s
diff --git a/test/CodeGen/R600/si-sgpr-spill.ll b/test/CodeGen/R600/si-sgpr-spill.ll
index 05c5e31..b34a757 100644
--- a/test/CodeGen/R600/si-sgpr-spill.ll
+++ b/test/CodeGen/R600/si-sgpr-spill.ll
@@ -1,8 +1,5 @@
; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck %s
-; XXX: Enable when spilling is supported
-; XFAIL: *
-
; These tests check that the compiler won't crash when it needs to spill
; SGPRs.
@@ -690,3 +687,880 @@ attributes #3 = { readonly }
attributes #4 = { nounwind readonly }
!0 = metadata !{metadata !"const", null, i32 1}
+
+; CHECK-LABEL: @main1
+; CHECK: S_ENDPGM
+define void @main1([17 x <16 x i8>] addrspace(2)* byval, [32 x <16 x i8>] addrspace(2)* byval, [16 x <32 x i8>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+ %21 = getelementptr [17 x <16 x i8>] addrspace(2)* %0, i64 0, i32 0
+ %22 = load <16 x i8> addrspace(2)* %21, !tbaa !0
+ %23 = call float @llvm.SI.load.const(<16 x i8> %22, i32 0)
+ %24 = call float @llvm.SI.load.const(<16 x i8> %22, i32 4)
+ %25 = call float @llvm.SI.load.const(<16 x i8> %22, i32 8)
+ %26 = call float @llvm.SI.load.const(<16 x i8> %22, i32 12)
+ %27 = call float @llvm.SI.load.const(<16 x i8> %22, i32 28)
+ %28 = call float @llvm.SI.load.const(<16 x i8> %22, i32 48)
+ %29 = call float @llvm.SI.load.const(<16 x i8> %22, i32 52)
+ %30 = call float @llvm.SI.load.const(<16 x i8> %22, i32 56)
+ %31 = call float @llvm.SI.load.const(<16 x i8> %22, i32 64)
+ %32 = call float @llvm.SI.load.const(<16 x i8> %22, i32 68)
+ %33 = call float @llvm.SI.load.const(<16 x i8> %22, i32 72)
+ %34 = call float @llvm.SI.load.const(<16 x i8> %22, i32 76)
+ %35 = call float @llvm.SI.load.const(<16 x i8> %22, i32 128)
+ %36 = call float @llvm.SI.load.const(<16 x i8> %22, i32 132)
+ %37 = call float @llvm.SI.load.const(<16 x i8> %22, i32 144)
+ %38 = call float @llvm.SI.load.const(<16 x i8> %22, i32 148)
+ %39 = call float @llvm.SI.load.const(<16 x i8> %22, i32 152)
+ %40 = call float @llvm.SI.load.const(<16 x i8> %22, i32 160)
+ %41 = call float @llvm.SI.load.const(<16 x i8> %22, i32 164)
+ %42 = call float @llvm.SI.load.const(<16 x i8> %22, i32 168)
+ %43 = call float @llvm.SI.load.const(<16 x i8> %22, i32 172)
+ %44 = call float @llvm.SI.load.const(<16 x i8> %22, i32 176)
+ %45 = call float @llvm.SI.load.const(<16 x i8> %22, i32 180)
+ %46 = call float @llvm.SI.load.const(<16 x i8> %22, i32 184)
+ %47 = call float @llvm.SI.load.const(<16 x i8> %22, i32 192)
+ %48 = call float @llvm.SI.load.const(<16 x i8> %22, i32 196)
+ %49 = call float @llvm.SI.load.const(<16 x i8> %22, i32 200)
+ %50 = call float @llvm.SI.load.const(<16 x i8> %22, i32 208)
+ %51 = call float @llvm.SI.load.const(<16 x i8> %22, i32 212)
+ %52 = call float @llvm.SI.load.const(<16 x i8> %22, i32 216)
+ %53 = call float @llvm.SI.load.const(<16 x i8> %22, i32 220)
+ %54 = call float @llvm.SI.load.const(<16 x i8> %22, i32 236)
+ %55 = call float @llvm.SI.load.const(<16 x i8> %22, i32 240)
+ %56 = call float @llvm.SI.load.const(<16 x i8> %22, i32 244)
+ %57 = call float @llvm.SI.load.const(<16 x i8> %22, i32 248)
+ %58 = call float @llvm.SI.load.const(<16 x i8> %22, i32 252)
+ %59 = call float @llvm.SI.load.const(<16 x i8> %22, i32 256)
+ %60 = call float @llvm.SI.load.const(<16 x i8> %22, i32 260)
+ %61 = call float @llvm.SI.load.const(<16 x i8> %22, i32 264)
+ %62 = call float @llvm.SI.load.const(<16 x i8> %22, i32 268)
+ %63 = call float @llvm.SI.load.const(<16 x i8> %22, i32 272)
+ %64 = call float @llvm.SI.load.const(<16 x i8> %22, i32 276)
+ %65 = call float @llvm.SI.load.const(<16 x i8> %22, i32 280)
+ %66 = call float @llvm.SI.load.const(<16 x i8> %22, i32 284)
+ %67 = call float @llvm.SI.load.const(<16 x i8> %22, i32 288)
+ %68 = call float @llvm.SI.load.const(<16 x i8> %22, i32 292)
+ %69 = call float @llvm.SI.load.const(<16 x i8> %22, i32 464)
+ %70 = call float @llvm.SI.load.const(<16 x i8> %22, i32 468)
+ %71 = call float @llvm.SI.load.const(<16 x i8> %22, i32 472)
+ %72 = call float @llvm.SI.load.const(<16 x i8> %22, i32 496)
+ %73 = call float @llvm.SI.load.const(<16 x i8> %22, i32 500)
+ %74 = call float @llvm.SI.load.const(<16 x i8> %22, i32 504)
+ %75 = call float @llvm.SI.load.const(<16 x i8> %22, i32 512)
+ %76 = call float @llvm.SI.load.const(<16 x i8> %22, i32 516)
+ %77 = call float @llvm.SI.load.const(<16 x i8> %22, i32 524)
+ %78 = call float @llvm.SI.load.const(<16 x i8> %22, i32 532)
+ %79 = call float @llvm.SI.load.const(<16 x i8> %22, i32 536)
+ %80 = call float @llvm.SI.load.const(<16 x i8> %22, i32 540)
+ %81 = call float @llvm.SI.load.const(<16 x i8> %22, i32 544)
+ %82 = call float @llvm.SI.load.const(<16 x i8> %22, i32 548)
+ %83 = call float @llvm.SI.load.const(<16 x i8> %22, i32 552)
+ %84 = call float @llvm.SI.load.const(<16 x i8> %22, i32 556)
+ %85 = call float @llvm.SI.load.const(<16 x i8> %22, i32 560)
+ %86 = call float @llvm.SI.load.const(<16 x i8> %22, i32 564)
+ %87 = call float @llvm.SI.load.const(<16 x i8> %22, i32 568)
+ %88 = call float @llvm.SI.load.const(<16 x i8> %22, i32 572)
+ %89 = call float @llvm.SI.load.const(<16 x i8> %22, i32 576)
+ %90 = call float @llvm.SI.load.const(<16 x i8> %22, i32 580)
+ %91 = call float @llvm.SI.load.const(<16 x i8> %22, i32 584)
+ %92 = call float @llvm.SI.load.const(<16 x i8> %22, i32 588)
+ %93 = call float @llvm.SI.load.const(<16 x i8> %22, i32 592)
+ %94 = call float @llvm.SI.load.const(<16 x i8> %22, i32 596)
+ %95 = call float @llvm.SI.load.const(<16 x i8> %22, i32 600)
+ %96 = call float @llvm.SI.load.const(<16 x i8> %22, i32 604)
+ %97 = call float @llvm.SI.load.const(<16 x i8> %22, i32 608)
+ %98 = call float @llvm.SI.load.const(<16 x i8> %22, i32 612)
+ %99 = call float @llvm.SI.load.const(<16 x i8> %22, i32 616)
+ %100 = call float @llvm.SI.load.const(<16 x i8> %22, i32 624)
+ %101 = call float @llvm.SI.load.const(<16 x i8> %22, i32 628)
+ %102 = call float @llvm.SI.load.const(<16 x i8> %22, i32 632)
+ %103 = call float @llvm.SI.load.const(<16 x i8> %22, i32 636)
+ %104 = call float @llvm.SI.load.const(<16 x i8> %22, i32 640)
+ %105 = call float @llvm.SI.load.const(<16 x i8> %22, i32 644)
+ %106 = call float @llvm.SI.load.const(<16 x i8> %22, i32 648)
+ %107 = call float @llvm.SI.load.const(<16 x i8> %22, i32 652)
+ %108 = call float @llvm.SI.load.const(<16 x i8> %22, i32 656)
+ %109 = call float @llvm.SI.load.const(<16 x i8> %22, i32 660)
+ %110 = call float @llvm.SI.load.const(<16 x i8> %22, i32 664)
+ %111 = call float @llvm.SI.load.const(<16 x i8> %22, i32 668)
+ %112 = call float @llvm.SI.load.const(<16 x i8> %22, i32 672)
+ %113 = call float @llvm.SI.load.const(<16 x i8> %22, i32 676)
+ %114 = call float @llvm.SI.load.const(<16 x i8> %22, i32 680)
+ %115 = call float @llvm.SI.load.const(<16 x i8> %22, i32 684)
+ %116 = call float @llvm.SI.load.const(<16 x i8> %22, i32 688)
+ %117 = call float @llvm.SI.load.const(<16 x i8> %22, i32 692)
+ %118 = call float @llvm.SI.load.const(<16 x i8> %22, i32 696)
+ %119 = call float @llvm.SI.load.const(<16 x i8> %22, i32 700)
+ %120 = call float @llvm.SI.load.const(<16 x i8> %22, i32 704)
+ %121 = call float @llvm.SI.load.const(<16 x i8> %22, i32 708)
+ %122 = call float @llvm.SI.load.const(<16 x i8> %22, i32 712)
+ %123 = call float @llvm.SI.load.const(<16 x i8> %22, i32 716)
+ %124 = call float @llvm.SI.load.const(<16 x i8> %22, i32 864)
+ %125 = call float @llvm.SI.load.const(<16 x i8> %22, i32 868)
+ %126 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 0
+ %127 = load <32 x i8> addrspace(2)* %126, !tbaa !0
+ %128 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 0
+ %129 = load <16 x i8> addrspace(2)* %128, !tbaa !0
+ %130 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 1
+ %131 = load <32 x i8> addrspace(2)* %130, !tbaa !0
+ %132 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 1
+ %133 = load <16 x i8> addrspace(2)* %132, !tbaa !0
+ %134 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 2
+ %135 = load <32 x i8> addrspace(2)* %134, !tbaa !0
+ %136 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 2
+ %137 = load <16 x i8> addrspace(2)* %136, !tbaa !0
+ %138 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 3
+ %139 = load <32 x i8> addrspace(2)* %138, !tbaa !0
+ %140 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 3
+ %141 = load <16 x i8> addrspace(2)* %140, !tbaa !0
+ %142 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 4
+ %143 = load <32 x i8> addrspace(2)* %142, !tbaa !0
+ %144 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 4
+ %145 = load <16 x i8> addrspace(2)* %144, !tbaa !0
+ %146 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 5
+ %147 = load <32 x i8> addrspace(2)* %146, !tbaa !0
+ %148 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 5
+ %149 = load <16 x i8> addrspace(2)* %148, !tbaa !0
+ %150 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 6
+ %151 = load <32 x i8> addrspace(2)* %150, !tbaa !0
+ %152 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 6
+ %153 = load <16 x i8> addrspace(2)* %152, !tbaa !0
+ %154 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 7
+ %155 = load <32 x i8> addrspace(2)* %154, !tbaa !0
+ %156 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 7
+ %157 = load <16 x i8> addrspace(2)* %156, !tbaa !0
+ %158 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 8
+ %159 = load <32 x i8> addrspace(2)* %158, !tbaa !0
+ %160 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 8
+ %161 = load <16 x i8> addrspace(2)* %160, !tbaa !0
+ %162 = fcmp ugt float %17, 0.000000e+00
+ %163 = select i1 %162, float 1.000000e+00, float 0.000000e+00
+ %164 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %4, <2 x i32> %6)
+ %165 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %4, <2 x i32> %6)
+ %166 = call float @llvm.SI.fs.interp(i32 2, i32 0, i32 %4, <2 x i32> %6)
+ %167 = call float @llvm.SI.fs.interp(i32 3, i32 0, i32 %4, <2 x i32> %6)
+ %168 = call float @llvm.SI.fs.interp(i32 0, i32 1, i32 %4, <2 x i32> %6)
+ %169 = call float @llvm.SI.fs.interp(i32 1, i32 1, i32 %4, <2 x i32> %6)
+ %170 = call float @llvm.SI.fs.interp(i32 2, i32 1, i32 %4, <2 x i32> %6)
+ %171 = call float @llvm.SI.fs.interp(i32 3, i32 1, i32 %4, <2 x i32> %6)
+ %172 = call float @llvm.SI.fs.interp(i32 0, i32 2, i32 %4, <2 x i32> %6)
+ %173 = call float @llvm.SI.fs.interp(i32 1, i32 2, i32 %4, <2 x i32> %6)
+ %174 = call float @llvm.SI.fs.interp(i32 2, i32 2, i32 %4, <2 x i32> %6)
+ %175 = call float @llvm.SI.fs.interp(i32 3, i32 2, i32 %4, <2 x i32> %6)
+ %176 = call float @llvm.SI.fs.interp(i32 0, i32 3, i32 %4, <2 x i32> %6)
+ %177 = call float @llvm.SI.fs.interp(i32 1, i32 3, i32 %4, <2 x i32> %6)
+ %178 = call float @llvm.SI.fs.interp(i32 2, i32 3, i32 %4, <2 x i32> %6)
+ %179 = call float @llvm.SI.fs.interp(i32 3, i32 3, i32 %4, <2 x i32> %6)
+ %180 = call float @llvm.SI.fs.interp(i32 0, i32 4, i32 %4, <2 x i32> %6)
+ %181 = call float @llvm.SI.fs.interp(i32 1, i32 4, i32 %4, <2 x i32> %6)
+ %182 = call float @llvm.SI.fs.interp(i32 2, i32 4, i32 %4, <2 x i32> %6)
+ %183 = call float @llvm.SI.fs.interp(i32 3, i32 4, i32 %4, <2 x i32> %6)
+ %184 = call float @llvm.SI.fs.interp(i32 0, i32 5, i32 %4, <2 x i32> %6)
+ %185 = call float @llvm.SI.fs.interp(i32 1, i32 5, i32 %4, <2 x i32> %6)
+ %186 = call float @llvm.SI.fs.interp(i32 2, i32 5, i32 %4, <2 x i32> %6)
+ %187 = call float @llvm.SI.fs.interp(i32 3, i32 5, i32 %4, <2 x i32> %6)
+ %188 = call float @llvm.SI.fs.interp(i32 0, i32 6, i32 %4, <2 x i32> %6)
+ %189 = call float @llvm.SI.fs.interp(i32 1, i32 6, i32 %4, <2 x i32> %6)
+ %190 = call float @llvm.SI.fs.interp(i32 2, i32 6, i32 %4, <2 x i32> %6)
+ %191 = call float @llvm.SI.fs.interp(i32 3, i32 6, i32 %4, <2 x i32> %6)
+ %192 = call float @llvm.SI.fs.interp(i32 0, i32 7, i32 %4, <2 x i32> %6)
+ %193 = call float @llvm.SI.fs.interp(i32 1, i32 7, i32 %4, <2 x i32> %6)
+ %194 = call float @llvm.SI.fs.interp(i32 2, i32 7, i32 %4, <2 x i32> %6)
+ %195 = call float @llvm.SI.fs.interp(i32 3, i32 7, i32 %4, <2 x i32> %6)
+ %196 = fmul float %14, %124
+ %197 = fadd float %196, %125
+ %198 = call float @llvm.AMDIL.clamp.(float %163, float 0.000000e+00, float 1.000000e+00)
+ %199 = call float @llvm.AMDIL.clamp.(float 0.000000e+00, float 0.000000e+00, float 1.000000e+00)
+ %200 = call float @llvm.AMDIL.clamp.(float 0.000000e+00, float 0.000000e+00, float 1.000000e+00)
+ %201 = call float @llvm.AMDIL.clamp.(float 1.000000e+00, float 0.000000e+00, float 1.000000e+00)
+ %202 = bitcast float %198 to i32
+ %203 = icmp ne i32 %202, 0
+ %. = select i1 %203, float -1.000000e+00, float 1.000000e+00
+ %204 = fsub float -0.000000e+00, %164
+ %205 = fadd float %44, %204
+ %206 = fsub float -0.000000e+00, %165
+ %207 = fadd float %45, %206
+ %208 = fsub float -0.000000e+00, %166
+ %209 = fadd float %46, %208
+ %210 = fmul float %205, %205
+ %211 = fmul float %207, %207
+ %212 = fadd float %211, %210
+ %213 = fmul float %209, %209
+ %214 = fadd float %212, %213
+ %215 = call float @llvm.AMDGPU.rsq(float %214)
+ %216 = fmul float %205, %215
+ %217 = fmul float %207, %215
+ %218 = fmul float %209, %215
+ %219 = fmul float %., %54
+ %220 = fmul float %13, %47
+ %221 = fmul float %197, %48
+ %222 = bitcast float %174 to i32
+ %223 = bitcast float %175 to i32
+ %224 = insertelement <2 x i32> undef, i32 %222, i32 0
+ %225 = insertelement <2 x i32> %224, i32 %223, i32 1
+ %226 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %225, <32 x i8> %131, <16 x i8> %133, i32 2)
+ %227 = extractelement <4 x float> %226, i32 0
+ %228 = extractelement <4 x float> %226, i32 1
+ %229 = extractelement <4 x float> %226, i32 2
+ %230 = extractelement <4 x float> %226, i32 3
+ %231 = fmul float %227, 0x4012611180000000
+ %232 = fmul float %228, 0x4012611180000000
+ %233 = fmul float %229, 0x4012611180000000
+ %234 = call float @llvm.AMDGPU.lrp(float %27, float %231, float 1.000000e+00)
+ %235 = call float @llvm.AMDGPU.lrp(float %27, float %232, float 1.000000e+00)
+ %236 = call float @llvm.AMDGPU.lrp(float %27, float %233, float 1.000000e+00)
+ %237 = fmul float %216, %184
+ %238 = fmul float %217, %185
+ %239 = fadd float %238, %237
+ %240 = fmul float %218, %186
+ %241 = fadd float %239, %240
+ %242 = fmul float %216, %187
+ %243 = fmul float %217, %188
+ %244 = fadd float %243, %242
+ %245 = fmul float %218, %189
+ %246 = fadd float %244, %245
+ %247 = fmul float %216, %190
+ %248 = fmul float %217, %191
+ %249 = fadd float %248, %247
+ %250 = fmul float %218, %192
+ %251 = fadd float %249, %250
+ %252 = call float @llvm.AMDIL.clamp.(float %251, float 0.000000e+00, float 1.000000e+00)
+ %253 = fmul float %214, 0x3F5A36E2E0000000
+ %254 = call float @llvm.AMDIL.clamp.(float %253, float 0.000000e+00, float 1.000000e+00)
+ %255 = fsub float -0.000000e+00, %254
+ %256 = fadd float 1.000000e+00, %255
+ %257 = call float @llvm.pow.f32(float %252, float 2.500000e-01)
+ %258 = fmul float %39, %257
+ %259 = fmul float %241, %258
+ %260 = fmul float %246, %258
+ %261 = fmul float %259, %230
+ %262 = fmul float %260, %230
+ %263 = fadd float %252, 0x3EE4F8B580000000
+ %264 = fsub float -0.000000e+00, %252
+ %265 = fadd float 1.000000e+00, %264
+ %266 = fmul float 1.200000e+01, %265
+ %267 = fadd float %266, 4.000000e+00
+ %268 = fsub float -0.000000e+00, %267
+ %269 = fmul float %268, %263
+ %270 = fsub float -0.000000e+00, %267
+ %271 = fmul float %270, %263
+ %272 = fsub float -0.000000e+00, %267
+ %273 = fmul float %272, %263
+ %274 = fdiv float 1.000000e+00, %269
+ %275 = fdiv float 1.000000e+00, %271
+ %276 = fdiv float 1.000000e+00, %273
+ %277 = fmul float %261, %274
+ %278 = fmul float %262, %275
+ %279 = fmul float %263, %276
+ br label %LOOP
+
+LOOP: ; preds = %LOOP, %main_body
+ %temp144.0 = phi float [ 1.000000e+00, %main_body ], [ %292, %LOOP ]
+ %temp168.0 = phi float [ %176, %main_body ], [ %288, %LOOP ]
+ %temp169.0 = phi float [ %177, %main_body ], [ %289, %LOOP ]
+ %temp170.0 = phi float [ %256, %main_body ], [ %290, %LOOP ]
+ %280 = bitcast float %temp168.0 to i32
+ %281 = bitcast float %temp169.0 to i32
+ %282 = insertelement <4 x i32> undef, i32 %280, i32 0
+ %283 = insertelement <4 x i32> %282, i32 %281, i32 1
+ %284 = insertelement <4 x i32> %283, i32 0, i32 2
+ %285 = insertelement <4 x i32> %284, i32 undef, i32 3
+ %286 = call <4 x float> @llvm.SI.samplel.v4i32(<4 x i32> %285, <32 x i8> %147, <16 x i8> %149, i32 2)
+ %287 = extractelement <4 x float> %286, i32 3
+ %288 = fadd float %temp168.0, %277
+ %289 = fadd float %temp169.0, %278
+ %290 = fadd float %temp170.0, %279
+ %291 = fsub float -0.000000e+00, %287
+ %292 = fadd float %290, %291
+ %293 = fcmp oge float 0.000000e+00, %292
+ %294 = sext i1 %293 to i32
+ %295 = bitcast i32 %294 to float
+ %296 = bitcast float %295 to i32
+ %297 = icmp ne i32 %296, 0
+ br i1 %297, label %IF189, label %LOOP
+
+IF189: ; preds = %LOOP
+ %298 = extractelement <4 x float> %286, i32 0
+ %299 = extractelement <4 x float> %286, i32 1
+ %300 = extractelement <4 x float> %286, i32 2
+ %301 = fsub float -0.000000e+00, %292
+ %302 = fadd float %temp144.0, %301
+ %303 = fdiv float 1.000000e+00, %302
+ %304 = fmul float %292, %303
+ %305 = fadd float %304, -1.000000e+00
+ %306 = fmul float %305, %277
+ %307 = fadd float %306, %288
+ %308 = fmul float %305, %278
+ %309 = fadd float %308, %289
+ %310 = fsub float -0.000000e+00, %176
+ %311 = fadd float %307, %310
+ %312 = fsub float -0.000000e+00, %177
+ %313 = fadd float %309, %312
+ %314 = fadd float %176, %311
+ %315 = fadd float %177, %313
+ %316 = fmul float %311, %67
+ %317 = fmul float %313, %68
+ %318 = fmul float %316, %55
+ %319 = fmul float %316, %56
+ %320 = fmul float %317, %57
+ %321 = fadd float %320, %318
+ %322 = fmul float %317, %58
+ %323 = fadd float %322, %319
+ %324 = fadd float %178, %321
+ %325 = fadd float %179, %323
+ %326 = fmul float %316, %59
+ %327 = fmul float %316, %60
+ %328 = fmul float %316, %61
+ %329 = fmul float %316, %62
+ %330 = fmul float %317, %63
+ %331 = fadd float %330, %326
+ %332 = fmul float %317, %64
+ %333 = fadd float %332, %327
+ %334 = fmul float %317, %65
+ %335 = fadd float %334, %328
+ %336 = fmul float %317, %66
+ %337 = fadd float %336, %329
+ %338 = fadd float %168, %331
+ %339 = fadd float %169, %333
+ %340 = fadd float %170, %335
+ %341 = fadd float %171, %337
+ %342 = bitcast float %338 to i32
+ %343 = bitcast float %339 to i32
+ %344 = insertelement <2 x i32> undef, i32 %342, i32 0
+ %345 = insertelement <2 x i32> %344, i32 %343, i32 1
+ %346 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %345, <32 x i8> %135, <16 x i8> %137, i32 2)
+ %347 = extractelement <4 x float> %346, i32 0
+ %348 = extractelement <4 x float> %346, i32 1
+ %349 = extractelement <4 x float> %346, i32 2
+ %350 = extractelement <4 x float> %346, i32 3
+ %351 = fmul float %347, %23
+ %352 = fmul float %348, %24
+ %353 = fmul float %349, %25
+ %354 = fmul float %350, %26
+ %355 = fmul float %351, %180
+ %356 = fmul float %352, %181
+ %357 = fmul float %353, %182
+ %358 = fmul float %354, %183
+ %359 = fsub float -0.000000e+00, %350
+ %360 = fadd float 1.000000e+00, %359
+ %361 = fmul float %360, %49
+ %362 = call float @llvm.AMDGPU.lrp(float %361, float %347, float %355)
+ %363 = call float @llvm.AMDGPU.lrp(float %361, float %348, float %356)
+ %364 = call float @llvm.AMDGPU.lrp(float %361, float %349, float %357)
+ %365 = bitcast float %340 to i32
+ %366 = bitcast float %341 to i32
+ %367 = insertelement <2 x i32> undef, i32 %365, i32 0
+ %368 = insertelement <2 x i32> %367, i32 %366, i32 1
+ %369 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %368, <32 x i8> %151, <16 x i8> %153, i32 2)
+ %370 = extractelement <4 x float> %369, i32 2
+ %371 = fmul float %362, %234
+ %372 = fmul float %363, %235
+ %373 = fmul float %364, %236
+ %374 = fmul float %358, %230
+ %375 = bitcast float %314 to i32
+ %376 = bitcast float %315 to i32
+ %377 = insertelement <2 x i32> undef, i32 %375, i32 0
+ %378 = insertelement <2 x i32> %377, i32 %376, i32 1
+ %379 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %378, <32 x i8> %139, <16 x i8> %141, i32 2)
+ %380 = extractelement <4 x float> %379, i32 0
+ %381 = extractelement <4 x float> %379, i32 1
+ %382 = extractelement <4 x float> %379, i32 2
+ %383 = extractelement <4 x float> %379, i32 3
+ %384 = fcmp olt float 0.000000e+00, %382
+ %385 = sext i1 %384 to i32
+ %386 = bitcast i32 %385 to float
+ %387 = bitcast float %386 to i32
+ %388 = icmp ne i32 %387, 0
+ %.224 = select i1 %388, float %381, float %380
+ %.225 = select i1 %388, float %383, float %381
+ %389 = bitcast float %324 to i32
+ %390 = bitcast float %325 to i32
+ %391 = insertelement <2 x i32> undef, i32 %389, i32 0
+ %392 = insertelement <2 x i32> %391, i32 %390, i32 1
+ %393 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %392, <32 x i8> %143, <16 x i8> %145, i32 2)
+ %394 = extractelement <4 x float> %393, i32 0
+ %395 = extractelement <4 x float> %393, i32 1
+ %396 = extractelement <4 x float> %393, i32 2
+ %397 = extractelement <4 x float> %393, i32 3
+ %398 = fcmp olt float 0.000000e+00, %396
+ %399 = sext i1 %398 to i32
+ %400 = bitcast i32 %399 to float
+ %401 = bitcast float %400 to i32
+ %402 = icmp ne i32 %401, 0
+ %temp112.1 = select i1 %402, float %395, float %394
+ %temp113.1 = select i1 %402, float %397, float %395
+ %403 = fmul float %.224, 2.000000e+00
+ %404 = fadd float %403, -1.000000e+00
+ %405 = fmul float %.225, 2.000000e+00
+ %406 = fadd float %405, -1.000000e+00
+ %407 = fmul float %temp112.1, 2.000000e+00
+ %408 = fadd float %407, -1.000000e+00
+ %409 = fmul float %temp113.1, 2.000000e+00
+ %410 = fadd float %409, -1.000000e+00
+ %411 = fsub float -0.000000e+00, %404
+ %412 = fmul float %411, %35
+ %413 = fsub float -0.000000e+00, %406
+ %414 = fmul float %413, %35
+ %415 = fsub float -0.000000e+00, %408
+ %416 = fmul float %415, %36
+ %417 = fsub float -0.000000e+00, %410
+ %418 = fmul float %417, %36
+ %419 = fmul float %416, %370
+ %420 = fmul float %418, %370
+ %421 = call float @fabs(float %412)
+ %422 = call float @fabs(float %414)
+ %423 = fsub float -0.000000e+00, %421
+ %424 = fadd float 1.000000e+00, %423
+ %425 = fsub float -0.000000e+00, %422
+ %426 = fadd float 1.000000e+00, %425
+ %427 = fmul float %424, %419
+ %428 = fadd float %427, %412
+ %429 = fmul float %426, %420
+ %430 = fadd float %429, %414
+ %431 = fmul float %428, %428
+ %432 = fmul float %430, %430
+ %433 = fadd float %431, %432
+ %434 = fsub float -0.000000e+00, %433
+ %435 = fadd float 0x3FF00068E0000000, %434
+ %436 = call float @llvm.AMDIL.clamp.(float %435, float 0.000000e+00, float 1.000000e+00)
+ %437 = call float @llvm.AMDGPU.rsq(float %436)
+ %438 = fmul float %437, %436
+ %439 = fsub float -0.000000e+00, %436
+ %440 = call float @llvm.AMDGPU.cndlt(float %439, float %438, float 0.000000e+00)
+ %441 = fmul float %184, %428
+ %442 = fmul float %185, %428
+ %443 = fmul float %186, %428
+ %444 = fmul float %187, %430
+ %445 = fadd float %444, %441
+ %446 = fmul float %188, %430
+ %447 = fadd float %446, %442
+ %448 = fmul float %189, %430
+ %449 = fadd float %448, %443
+ %450 = fmul float %190, %440
+ %451 = fadd float %450, %445
+ %452 = fmul float %191, %440
+ %453 = fadd float %452, %447
+ %454 = fmul float %192, %440
+ %455 = fadd float %454, %449
+ %456 = fmul float %451, %451
+ %457 = fmul float %453, %453
+ %458 = fadd float %457, %456
+ %459 = fmul float %455, %455
+ %460 = fadd float %458, %459
+ %461 = call float @llvm.AMDGPU.rsq(float %460)
+ %462 = fmul float %451, %461
+ %463 = fmul float %453, %461
+ %464 = fmul float %455, %461
+ %465 = fcmp olt float 0.000000e+00, %219
+ %466 = sext i1 %465 to i32
+ %467 = bitcast i32 %466 to float
+ %468 = bitcast float %467 to i32
+ %469 = icmp ne i32 %468, 0
+ br i1 %469, label %IF198, label %ENDIF197
+
+IF198: ; preds = %IF189
+ %470 = fsub float -0.000000e+00, %462
+ %471 = fsub float -0.000000e+00, %463
+ %472 = fsub float -0.000000e+00, %464
+ br label %ENDIF197
+
+ENDIF197: ; preds = %IF189, %IF198
+ %temp14.0 = phi float [ %472, %IF198 ], [ %464, %IF189 ]
+ %temp13.0 = phi float [ %471, %IF198 ], [ %463, %IF189 ]
+ %temp12.0 = phi float [ %470, %IF198 ], [ %462, %IF189 ]
+ %473 = bitcast float %220 to i32
+ %474 = bitcast float %221 to i32
+ %475 = insertelement <2 x i32> undef, i32 %473, i32 0
+ %476 = insertelement <2 x i32> %475, i32 %474, i32 1
+ %477 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %476, <32 x i8> %159, <16 x i8> %161, i32 2)
+ %478 = extractelement <4 x float> %477, i32 0
+ %479 = extractelement <4 x float> %477, i32 1
+ %480 = extractelement <4 x float> %477, i32 2
+ %481 = extractelement <4 x float> %477, i32 3
+ %482 = fmul float %478, %40
+ %483 = fadd float %482, %41
+ %484 = fmul float %479, %40
+ %485 = fadd float %484, %41
+ %486 = fmul float %480, %40
+ %487 = fadd float %486, %41
+ %488 = fmul float %481, %42
+ %489 = fadd float %488, %43
+ %490 = bitcast float %172 to i32
+ %491 = bitcast float %173 to i32
+ %492 = insertelement <2 x i32> undef, i32 %490, i32 0
+ %493 = insertelement <2 x i32> %492, i32 %491, i32 1
+ %494 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %493, <32 x i8> %155, <16 x i8> %157, i32 2)
+ %495 = extractelement <4 x float> %494, i32 0
+ %496 = extractelement <4 x float> %494, i32 1
+ %497 = extractelement <4 x float> %494, i32 2
+ %498 = extractelement <4 x float> %494, i32 3
+ %499 = fmul float %498, 3.200000e+01
+ %500 = fadd float %499, -1.600000e+01
+ %501 = call float @llvm.AMDIL.exp.(float %500)
+ %502 = fmul float %495, %501
+ %503 = fmul float %496, %501
+ %504 = fmul float %497, %501
+ %505 = fmul float %28, %502
+ %506 = fadd float %505, %193
+ %507 = fmul float %29, %503
+ %508 = fadd float %507, %194
+ %509 = fmul float %30, %504
+ %510 = fadd float %509, %195
+ %511 = fmul float %506, %489
+ %512 = fmul float %508, %489
+ %513 = fmul float %510, %489
+ %514 = fmul float %489, 5.000000e-01
+ %515 = fadd float %514, 5.000000e-01
+ %516 = fmul float %483, %515
+ %517 = fadd float %516, %511
+ %518 = fmul float %485, %515
+ %519 = fadd float %518, %512
+ %520 = fmul float %487, %515
+ %521 = fadd float %520, %513
+ %522 = fmul float %517, %371
+ %523 = fmul float %519, %372
+ %524 = fmul float %521, %373
+ %525 = fmul float %428, 0x3FDB272440000000
+ %526 = fmul float %430, 0xBFDB272440000000
+ %527 = fadd float %526, %525
+ %528 = fmul float %440, 0x3FE99999A0000000
+ %529 = fadd float %527, %528
+ %530 = fmul float %529, 5.000000e-01
+ %531 = fadd float %530, 0x3FE3333340000000
+ %532 = fmul float %531, %531
+ %533 = fmul float %522, %532
+ %534 = fmul float %523, %532
+ %535 = fmul float %524, %532
+ %536 = fsub float -0.000000e+00, %72
+ %537 = fsub float -0.000000e+00, %73
+ %538 = fsub float -0.000000e+00, %74
+ %539 = fmul float %temp12.0, %536
+ %540 = fmul float %temp13.0, %537
+ %541 = fadd float %540, %539
+ %542 = fmul float %temp14.0, %538
+ %543 = fadd float %541, %542
+ %544 = call float @llvm.AMDIL.clamp.(float %543, float 0.000000e+00, float 1.000000e+00)
+ %545 = fmul float %371, %544
+ %546 = fmul float %372, %544
+ %547 = fmul float %373, %544
+ %548 = fmul float %545, %69
+ %549 = fmul float %546, %70
+ %550 = fmul float %547, %71
+ %551 = fsub float -0.000000e+00, %164
+ %552 = fadd float %97, %551
+ %553 = fsub float -0.000000e+00, %165
+ %554 = fadd float %98, %553
+ %555 = fsub float -0.000000e+00, %166
+ %556 = fadd float %99, %555
+ %557 = fmul float %552, %552
+ %558 = fmul float %554, %554
+ %559 = fadd float %558, %557
+ %560 = fmul float %556, %556
+ %561 = fadd float %559, %560
+ %562 = call float @llvm.AMDGPU.rsq(float %561)
+ %563 = fmul float %562, %561
+ %564 = fsub float -0.000000e+00, %561
+ %565 = call float @llvm.AMDGPU.cndlt(float %564, float %563, float 0.000000e+00)
+ %566 = fsub float -0.000000e+00, %84
+ %567 = fadd float %565, %566
+ %568 = fsub float -0.000000e+00, %83
+ %569 = fadd float %565, %568
+ %570 = fsub float -0.000000e+00, %82
+ %571 = fadd float %565, %570
+ %572 = fsub float -0.000000e+00, %84
+ %573 = fadd float %83, %572
+ %574 = fsub float -0.000000e+00, %83
+ %575 = fadd float %82, %574
+ %576 = fsub float -0.000000e+00, %82
+ %577 = fadd float %81, %576
+ %578 = fdiv float 1.000000e+00, %573
+ %579 = fdiv float 1.000000e+00, %575
+ %580 = fdiv float 1.000000e+00, %577
+ %581 = fmul float %567, %578
+ %582 = fmul float %569, %579
+ %583 = fmul float %571, %580
+ %584 = fcmp olt float %565, %83
+ %585 = sext i1 %584 to i32
+ %586 = bitcast i32 %585 to float
+ %587 = bitcast float %586 to i32
+ %588 = icmp ne i32 %587, 0
+ br i1 %588, label %ENDIF200, label %ELSE202
+
+ELSE202: ; preds = %ENDIF197
+ %589 = fcmp olt float %565, %82
+ %590 = sext i1 %589 to i32
+ %591 = bitcast i32 %590 to float
+ %592 = bitcast float %591 to i32
+ %593 = icmp ne i32 %592, 0
+ br i1 %593, label %ENDIF200, label %ELSE205
+
+ENDIF200: ; preds = %ELSE205, %ELSE202, %ENDIF197
+ %temp80.0 = phi float [ %581, %ENDIF197 ], [ %.226, %ELSE205 ], [ %582, %ELSE202 ]
+ %temp88.0 = phi float [ %122, %ENDIF197 ], [ %.227, %ELSE205 ], [ %120, %ELSE202 ]
+ %temp89.0 = phi float [ %123, %ENDIF197 ], [ %.228, %ELSE205 ], [ %121, %ELSE202 ]
+ %temp90.0 = phi float [ %120, %ENDIF197 ], [ %116, %ELSE205 ], [ %118, %ELSE202 ]
+ %temp91.0 = phi float [ %121, %ENDIF197 ], [ %117, %ELSE205 ], [ %119, %ELSE202 ]
+ %594 = fcmp olt float %565, %83
+ %595 = sext i1 %594 to i32
+ %596 = bitcast i32 %595 to float
+ %597 = bitcast float %596 to i32
+ %598 = icmp ne i32 %597, 0
+ br i1 %598, label %ENDIF209, label %ELSE211
+
+ELSE205: ; preds = %ELSE202
+ %599 = fcmp olt float %565, %81
+ %600 = sext i1 %599 to i32
+ %601 = bitcast i32 %600 to float
+ %602 = bitcast float %601 to i32
+ %603 = icmp ne i32 %602, 0
+ %.226 = select i1 %603, float %583, float 1.000000e+00
+ %.227 = select i1 %603, float %118, float %116
+ %.228 = select i1 %603, float %119, float %117
+ br label %ENDIF200
+
+ELSE211: ; preds = %ENDIF200
+ %604 = fcmp olt float %565, %82
+ %605 = sext i1 %604 to i32
+ %606 = bitcast i32 %605 to float
+ %607 = bitcast float %606 to i32
+ %608 = icmp ne i32 %607, 0
+ br i1 %608, label %ENDIF209, label %ELSE214
+
+ENDIF209: ; preds = %ELSE214, %ELSE211, %ENDIF200
+ %temp52.0 = phi float [ %108, %ENDIF200 ], [ %100, %ELSE214 ], [ %104, %ELSE211 ]
+ %temp53.0 = phi float [ %109, %ENDIF200 ], [ %101, %ELSE214 ], [ %105, %ELSE211 ]
+ %temp54.0 = phi float [ %110, %ENDIF200 ], [ %102, %ELSE214 ], [ %106, %ELSE211 ]
+ %temp55.0 = phi float [ %111, %ENDIF200 ], [ %103, %ELSE214 ], [ %107, %ELSE211 ]
+ %temp68.0 = phi float [ %112, %ENDIF200 ], [ %.230, %ELSE214 ], [ %108, %ELSE211 ]
+ %temp69.0 = phi float [ %113, %ENDIF200 ], [ %.231, %ELSE214 ], [ %109, %ELSE211 ]
+ %temp70.0 = phi float [ %114, %ENDIF200 ], [ %.232, %ELSE214 ], [ %110, %ELSE211 ]
+ %temp71.0 = phi float [ %115, %ENDIF200 ], [ %.233, %ELSE214 ], [ %111, %ELSE211 ]
+ %609 = fmul float %164, %85
+ %610 = fmul float %165, %86
+ %611 = fadd float %609, %610
+ %612 = fmul float %166, %87
+ %613 = fadd float %611, %612
+ %614 = fmul float %167, %88
+ %615 = fadd float %613, %614
+ %616 = fmul float %164, %89
+ %617 = fmul float %165, %90
+ %618 = fadd float %616, %617
+ %619 = fmul float %166, %91
+ %620 = fadd float %618, %619
+ %621 = fmul float %167, %92
+ %622 = fadd float %620, %621
+ %623 = fmul float %164, %93
+ %624 = fmul float %165, %94
+ %625 = fadd float %623, %624
+ %626 = fmul float %166, %95
+ %627 = fadd float %625, %626
+ %628 = fmul float %167, %96
+ %629 = fadd float %627, %628
+ %630 = fsub float -0.000000e+00, %78
+ %631 = fadd float 1.000000e+00, %630
+ %632 = call float @fabs(float %615)
+ %633 = call float @fabs(float %622)
+ %634 = fcmp oge float %631, %632
+ %635 = sext i1 %634 to i32
+ %636 = bitcast i32 %635 to float
+ %637 = bitcast float %636 to i32
+ %638 = and i32 %637, 1065353216
+ %639 = bitcast i32 %638 to float
+ %640 = fcmp oge float %631, %633
+ %641 = sext i1 %640 to i32
+ %642 = bitcast i32 %641 to float
+ %643 = bitcast float %642 to i32
+ %644 = and i32 %643, 1065353216
+ %645 = bitcast i32 %644 to float
+ %646 = fmul float %639, %645
+ %647 = fmul float %629, %646
+ %648 = fmul float %615, %temp68.0
+ %649 = fadd float %648, %temp70.0
+ %650 = fmul float %622, %temp69.0
+ %651 = fadd float %650, %temp71.0
+ %652 = fmul float %615, %temp52.0
+ %653 = fadd float %652, %temp54.0
+ %654 = fmul float %622, %temp53.0
+ %655 = fadd float %654, %temp55.0
+ %656 = fadd float %temp80.0, -1.000000e+00
+ %657 = fmul float %656, %77
+ %658 = fadd float %657, 1.000000e+00
+ %659 = call float @llvm.AMDIL.clamp.(float %658, float 0.000000e+00, float 1.000000e+00)
+ %660 = bitcast float %649 to i32
+ %661 = bitcast float %651 to i32
+ %662 = bitcast float 0.000000e+00 to i32
+ %663 = insertelement <4 x i32> undef, i32 %660, i32 0
+ %664 = insertelement <4 x i32> %663, i32 %661, i32 1
+ %665 = insertelement <4 x i32> %664, i32 %662, i32 2
+ %666 = insertelement <4 x i32> %665, i32 undef, i32 3
+ %667 = call <4 x float> @llvm.SI.samplel.v4i32(<4 x i32> %666, <32 x i8> %127, <16 x i8> %129, i32 2)
+ %668 = extractelement <4 x float> %667, i32 0
+ %669 = extractelement <4 x float> %667, i32 1
+ %670 = bitcast float %653 to i32
+ %671 = bitcast float %655 to i32
+ %672 = bitcast float 0.000000e+00 to i32
+ %673 = insertelement <4 x i32> undef, i32 %670, i32 0
+ %674 = insertelement <4 x i32> %673, i32 %671, i32 1
+ %675 = insertelement <4 x i32> %674, i32 %672, i32 2
+ %676 = insertelement <4 x i32> %675, i32 undef, i32 3
+ %677 = call <4 x float> @llvm.SI.samplel.v4i32(<4 x i32> %676, <32 x i8> %127, <16 x i8> %129, i32 2)
+ %678 = extractelement <4 x float> %677, i32 0
+ %679 = extractelement <4 x float> %677, i32 1
+ %680 = fsub float -0.000000e+00, %669
+ %681 = fadd float 1.000000e+00, %680
+ %682 = fsub float -0.000000e+00, %679
+ %683 = fadd float 1.000000e+00, %682
+ %684 = fmul float %681, 2.500000e-01
+ %685 = fmul float %683, 2.500000e-01
+ %686 = fsub float -0.000000e+00, %684
+ %687 = fadd float %668, %686
+ %688 = fsub float -0.000000e+00, %685
+ %689 = fadd float %678, %688
+ %690 = fmul float %647, %temp88.0
+ %691 = fadd float %690, %temp89.0
+ %692 = fmul float %647, %temp90.0
+ %693 = fadd float %692, %temp91.0
+ %694 = call float @llvm.AMDIL.clamp.(float %691, float 0.000000e+00, float 1.000000e+00)
+ %695 = call float @llvm.AMDIL.clamp.(float %693, float 0.000000e+00, float 1.000000e+00)
+ %696 = fsub float -0.000000e+00, %694
+ %697 = fadd float %668, %696
+ %698 = fsub float -0.000000e+00, %695
+ %699 = fadd float %678, %698
+ %700 = fmul float %668, %668
+ %701 = fmul float %678, %678
+ %702 = fsub float -0.000000e+00, %700
+ %703 = fadd float %687, %702
+ %704 = fsub float -0.000000e+00, %701
+ %705 = fadd float %689, %704
+ %706 = fcmp uge float %703, %75
+ %707 = select i1 %706, float %703, float %75
+ %708 = fcmp uge float %705, %75
+ %709 = select i1 %708, float %705, float %75
+ %710 = fmul float %697, %697
+ %711 = fadd float %710, %707
+ %712 = fmul float %699, %699
+ %713 = fadd float %712, %709
+ %714 = fdiv float 1.000000e+00, %711
+ %715 = fdiv float 1.000000e+00, %713
+ %716 = fmul float %707, %714
+ %717 = fmul float %709, %715
+ %718 = fcmp oge float %697, 0.000000e+00
+ %719 = sext i1 %718 to i32
+ %720 = bitcast i32 %719 to float
+ %721 = bitcast float %720 to i32
+ %722 = icmp ne i32 %721, 0
+ %.229 = select i1 %722, float 1.000000e+00, float %716
+ %723 = fcmp oge float %699, 0.000000e+00
+ %724 = sext i1 %723 to i32
+ %725 = bitcast i32 %724 to float
+ %726 = bitcast float %725 to i32
+ %727 = icmp ne i32 %726, 0
+ %temp28.0 = select i1 %727, float 1.000000e+00, float %717
+ %728 = call float @llvm.AMDGPU.lrp(float %659, float %temp28.0, float %.229)
+ %729 = call float @llvm.pow.f32(float %728, float %76)
+ %730 = fmul float %729, %79
+ %731 = fadd float %730, %80
+ %732 = call float @llvm.AMDIL.clamp.(float %731, float 0.000000e+00, float 1.000000e+00)
+ %733 = fmul float %732, %732
+ %734 = fmul float 2.000000e+00, %732
+ %735 = fsub float -0.000000e+00, %734
+ %736 = fadd float 3.000000e+00, %735
+ %737 = fmul float %733, %736
+ %738 = fmul float %548, %737
+ %739 = fmul float %549, %737
+ %740 = fmul float %550, %737
+ %741 = fmul float %738, %515
+ %742 = fadd float %741, %533
+ %743 = fmul float %739, %515
+ %744 = fadd float %743, %534
+ %745 = fmul float %740, %515
+ %746 = fadd float %745, %535
+ %747 = call float @llvm.AMDGPU.lrp(float %230, float %287, float 1.000000e+00)
+ %748 = call float @llvm.AMDGPU.lrp(float %37, float %298, float 1.000000e+00)
+ %749 = call float @llvm.AMDGPU.lrp(float %37, float %299, float 1.000000e+00)
+ %750 = call float @llvm.AMDGPU.lrp(float %37, float %300, float 1.000000e+00)
+ %751 = call float @llvm.AMDGPU.lrp(float %38, float %747, float 1.000000e+00)
+ %752 = fmul float %748, %751
+ %753 = fmul float %749, %751
+ %754 = fmul float %750, %751
+ %755 = fmul float %742, %752
+ %756 = fmul float %744, %753
+ %757 = fmul float %746, %754
+ %758 = fmul float %temp12.0, %216
+ %759 = fmul float %temp13.0, %217
+ %760 = fadd float %759, %758
+ %761 = fmul float %temp14.0, %218
+ %762 = fadd float %760, %761
+ %763 = call float @fabs(float %762)
+ %764 = fmul float %763, %763
+ %765 = fmul float %764, %50
+ %766 = fadd float %765, %51
+ %767 = call float @llvm.AMDIL.clamp.(float %766, float 0.000000e+00, float 1.000000e+00)
+ %768 = fsub float -0.000000e+00, %767
+ %769 = fadd float 1.000000e+00, %768
+ %770 = fmul float %33, %769
+ %771 = fmul float %33, %769
+ %772 = fmul float %33, %769
+ %773 = fmul float %34, %769
+ %774 = call float @llvm.AMDGPU.lrp(float %770, float %31, float %755)
+ %775 = call float @llvm.AMDGPU.lrp(float %771, float %31, float %756)
+ %776 = call float @llvm.AMDGPU.lrp(float %772, float %31, float %757)
+ %777 = call float @llvm.AMDGPU.lrp(float %773, float %32, float %374)
+ %778 = fcmp uge float %774, 0x3E6FFFFE60000000
+ %779 = select i1 %778, float %774, float 0x3E6FFFFE60000000
+ %780 = fcmp uge float %775, 0x3E6FFFFE60000000
+ %781 = select i1 %780, float %775, float 0x3E6FFFFE60000000
+ %782 = fcmp uge float %776, 0x3E6FFFFE60000000
+ %783 = select i1 %782, float %776, float 0x3E6FFFFE60000000
+ %784 = fcmp uge float %779, 6.550400e+04
+ %785 = select i1 %784, float 6.550400e+04, float %779
+ %786 = fcmp uge float %781, 6.550400e+04
+ %787 = select i1 %786, float 6.550400e+04, float %781
+ %788 = fcmp uge float %783, 6.550400e+04
+ %789 = select i1 %788, float 6.550400e+04, float %783
+ %790 = fmul float %777, %52
+ %791 = fadd float %790, %53
+ %792 = call float @llvm.AMDIL.clamp.(float %791, float 0.000000e+00, float 1.000000e+00)
+ %793 = call i32 @llvm.SI.packf16(float %785, float %787)
+ %794 = bitcast i32 %793 to float
+ %795 = call i32 @llvm.SI.packf16(float %789, float %792)
+ %796 = bitcast i32 %795 to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %794, float %796, float %794, float %796)
+ ret void
+
+ELSE214: ; preds = %ELSE211
+ %797 = fcmp olt float %565, %81
+ %798 = sext i1 %797 to i32
+ %799 = bitcast i32 %798 to float
+ %800 = bitcast float %799 to i32
+ %801 = icmp ne i32 %800, 0
+ %.230 = select i1 %801, float %104, float %100
+ %.231 = select i1 %801, float %105, float %101
+ %.232 = select i1 %801, float %106, float %102
+ %.233 = select i1 %801, float %107, float %103
+ br label %ENDIF209
+}
+
+; Function Attrs: readnone
+declare float @llvm.AMDIL.clamp.(float, float, float) #2
+
+; Function Attrs: nounwind readnone
+declare <4 x float> @llvm.SI.sample.v2i32(<2 x i32>, <32 x i8>, <16 x i8>, i32) #1
+
+; Function Attrs: readnone
+declare float @llvm.AMDGPU.lrp(float, float, float) #2
+
+; Function Attrs: nounwind readnone
+declare <4 x float> @llvm.SI.samplel.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32) #1
+
+; Function Attrs: readnone
+declare float @llvm.AMDGPU.cndlt(float, float, float) #2
+
+; Function Attrs: readnone
+declare float @llvm.AMDIL.exp.(float) #2
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { readnone }
+attributes #3 = { nounwind readonly }
+attributes #4 = { readonly }
diff --git a/test/CodeGen/R600/smrd.ll b/test/CodeGen/R600/smrd.ll
new file mode 100644
index 0000000..43231df
--- /dev/null
+++ b/test/CodeGen/R600/smrd.ll
@@ -0,0 +1,80 @@
+; RUN: llc < %s -march=r600 -mcpu=SI -show-mc-encoding -verify-machineinstrs | FileCheck %s
+
+; SMRD load with an immediate offset.
+; CHECK-LABEL: @smrd0
+; CHECK: S_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 1 ; encoding: [0x01
+define void @smrd0(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32 addrspace(2)* %ptr, i64 1
+ %1 = load i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load with the largest possible immediate offset.
+; CHECK-LABEL: @smrd1
+; CHECK: S_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 255 ; encoding: [0xff
+define void @smrd1(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32 addrspace(2)* %ptr, i64 255
+ %1 = load i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load with an offset greater than the largest possible immediate.
+; CHECK-LABEL: @smrd2
+; CHECK: S_MOV_B32 s[[OFFSET:[0-9]]], 1024
+; CHECK: S_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
+define void @smrd2(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32 addrspace(2)* %ptr, i64 256
+ %1 = load i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load using the load.const intrinsic with an immediate offset
+; CHECK-LABEL: @smrd_load_const0
+; CHECK: S_BUFFER_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 4 ; encoding: [0x04
+define void @smrd_load_const0(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+ %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %21 = load <16 x i8> addrspace(2)* %20
+ %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
+ ret void
+}
+
+; SMRD load using the load.const intrinsic with an offset greater largest possible
+; immediate offset.
+; CHECK-LABEL: @smrd_load_const1
+; CHECK: S_BUFFER_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 255 ; encoding: [0xff
+define void @smrd_load_const1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+ %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %21 = load <16 x i8> addrspace(2)* %20
+ %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1020)
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
+ ret void
+}
+; SMRD load using the load.const intrinsic with the largetst possible
+; immediate offset.
+; CHECK-LABEL: @smrd_load_const2
+; CHECK: S_BUFFER_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
+define void @smrd_load_const2(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+ %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %21 = load <16 x i8> addrspace(2)* %20
+ %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1024)
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/store-v3i32.ll b/test/CodeGen/R600/store-v3i32.ll
new file mode 100644
index 0000000..3357803
--- /dev/null
+++ b/test/CodeGen/R600/store-v3i32.ll
@@ -0,0 +1,12 @@
+; XFAIL: *
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+; 3 vectors have the same size and alignment as 4 vectors, so this
+; should be done in a single store.
+
+; SI-LABEL: @store_v3i32:
+; SI: BUFFER_STORE_DWORDX4
+define void @store_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a) nounwind {
+ store <3 x i32> %a, <3 x i32> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/store-v3i64.ll b/test/CodeGen/R600/store-v3i64.ll
new file mode 100644
index 0000000..58229f6
--- /dev/null
+++ b/test/CodeGen/R600/store-v3i64.ll
@@ -0,0 +1,28 @@
+; XFAIL: *
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI
+
+; SI-LABEL: @global_store_v3i64:
+; SI: BUFFER_STORE_DWORDX4
+; SI: BUFFER_STORE_DWORDX4
+define void @global_store_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
+ store <3 x i64> %x, <3 x i64> addrspace(1)* %out, align 32
+ ret void
+}
+
+; SI-LABEL: @global_store_v3i64_unaligned:
+define void @global_store_v3i64_unaligned(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
+ store <3 x i64> %x, <3 x i64> addrspace(1)* %out, align 1
+ ret void
+}
+
+; SI-LABEL: @local_store_v3i64:
+define void @local_store_v3i64(<3 x i64> addrspace(3)* %out, <3 x i64> %x) {
+ store <3 x i64> %x, <3 x i64> addrspace(3)* %out, align 32
+ ret void
+}
+
+; SI-LABEL: @local_store_v3i64_unaligned:
+define void @local_store_v3i64_unaligned(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
+ store <3 x i64> %x, <3 x i64> addrspace(1)* %out, align 1
+ ret void
+}
diff --git a/test/CodeGen/R600/store-vector-ptrs.ll b/test/CodeGen/R600/store-vector-ptrs.ll
index 01210ce..3af7d91 100644
--- a/test/CodeGen/R600/store-vector-ptrs.ll
+++ b/test/CodeGen/R600/store-vector-ptrs.ll
@@ -1,3 +1,4 @@
+; REQUIRES: asserts
; XFAIL: *
; RUN: llc -march=r600 -mcpu=SI < %s
diff --git a/test/CodeGen/R600/store.ll b/test/CodeGen/R600/store.ll
index 5e51d56..a3c5331 100644
--- a/test/CodeGen/R600/store.ll
+++ b/test/CodeGen/R600/store.ll
@@ -1,10 +1,18 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=CM-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=CM-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=FUNC %s
;===------------------------------------------------------------------------===;
; Global Address Space
;===------------------------------------------------------------------------===;
+; FUNC-LABEL: @store_i1
+; EG-CHECK: MEM_RAT MSKOR
+; SI-CHECK: BUFFER_STORE_BYTE
+define void @store_i1(i1 addrspace(1)* %out) {
+entry:
+ store i1 true, i1 addrspace(1)* %out
+ ret void
+}
; i8 store
; EG-CHECK-LABEL: @store_i8
@@ -173,6 +181,15 @@ entry:
; Local Address Space
;===------------------------------------------------------------------------===;
+; FUNC-LABEL: @store_local_i1
+; EG-CHECK: LDS_BYTE_WRITE
+; SI-CHECK: DS_WRITE_B8
+define void @store_local_i1(i1 addrspace(3)* %out) {
+entry:
+ store i1 true, i1 addrspace(3)* %out
+ ret void
+}
+
; EG-CHECK-LABEL: @store_local_i8
; EG-CHECK: LDS_BYTE_WRITE
; SI-CHECK-LABEL: @store_local_i8
diff --git a/test/CodeGen/R600/trunc-store-i1.ll b/test/CodeGen/R600/trunc-store-i1.ll
new file mode 100644
index 0000000..a888943
--- /dev/null
+++ b/test/CodeGen/R600/trunc-store-i1.ll
@@ -0,0 +1,32 @@
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+
+; SI-LABEL: @global_truncstore_i32_to_i1
+; SI: S_LOAD_DWORD [[LOAD:s[0-9]+]],
+; SI: S_AND_B32 [[SREG:s[0-9]+]], [[LOAD]], 1
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], [[SREG]]
+; SI: BUFFER_STORE_BYTE [[VREG]],
+define void @global_truncstore_i32_to_i1(i1 addrspace(1)* %out, i32 %val) nounwind {
+ %trunc = trunc i32 %val to i1
+ store i1 %trunc, i1 addrspace(1)* %out, align 1
+ ret void
+}
+
+; SI-LABEL: @global_truncstore_i64_to_i1
+; SI: BUFFER_STORE_BYTE
+define void @global_truncstore_i64_to_i1(i1 addrspace(1)* %out, i64 %val) nounwind {
+ %trunc = trunc i64 %val to i1
+ store i1 %trunc, i1 addrspace(1)* %out, align 1
+ ret void
+}
+
+; SI-LABEL: @global_truncstore_i16_to_i1
+; SI: S_LOAD_DWORD [[LOAD:s[0-9]+]],
+; SI: S_AND_B32 [[SREG:s[0-9]+]], [[LOAD]], 1
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], [[SREG]]
+; SI: BUFFER_STORE_BYTE [[VREG]],
+define void @global_truncstore_i16_to_i1(i1 addrspace(1)* %out, i16 %val) nounwind {
+ %trunc = trunc i16 %val to i1
+ store i1 %trunc, i1 addrspace(1)* %out, align 1
+ ret void
+}
diff --git a/test/CodeGen/R600/trunc.ll b/test/CodeGen/R600/trunc.ll
index 0bd320a..8a759dc 100644
--- a/test/CodeGen/R600/trunc.ll
+++ b/test/CodeGen/R600/trunc.ll
@@ -16,15 +16,39 @@ define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
ret void
}
+; SI-LABEL: @trunc_load_shl_i64:
+; SI-DAG: S_LOAD_DWORDX2
+; SI-DAG: S_LOAD_DWORD [[SREG:s[0-9]+]],
+; SI: S_LSHL_B32 [[SHL:s[0-9]+]], [[SREG]], 2
+; SI: V_MOV_B32_e32 [[VSHL:v[0-9]+]], [[SHL]]
+; SI: BUFFER_STORE_DWORD [[VSHL]],
+define void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) {
+ %b = shl i64 %a, 2
+ %result = trunc i64 %b to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
; SI-LABEL: @trunc_shl_i64:
-; SI: S_LOAD_DWORDX2
-; SI: S_LOAD_DWORDX2 [[SREG:s\[[0-9]+:[0-9]+\]]]
-; SI: S_LSHL_B64 s{{\[}}[[LO_SREG:[0-9]+]]:{{[0-9]+\]}}, [[SREG]], 2
-; SI: MOV_B32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG]]
+; SI: S_LOAD_DWORDX2 s{{\[}}[[LO_SREG:[0-9]+]]:{{[0-9]+\]}},
+; SI: V_ADD_I32_e32 v[[LO_ADD:[0-9]+]], s[[LO_SREG]],
+; SI: V_LSHL_B64 v{{\[}}[[LO_VREG:[0-9]+]]:{{[0-9]+\]}}, v{{\[}}[[LO_ADD]]:{{[0-9]+\]}}, 2
; SI: BUFFER_STORE_DWORD v[[LO_VREG]],
-define void @trunc_shl_i64(i32 addrspace(1)* %out, i64 %a) {
- %b = shl i64 %a, 2
+define void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64 %a) {
+ %aa = add i64 %a, 234 ; Prevent shrinking store.
+ %b = shl i64 %aa, 2
%result = trunc i64 %b to i32
store i32 %result, i32 addrspace(1)* %out, align 4
+ store i64 %b, i64 addrspace(1)* %out2, align 8 ; Prevent reducing ops to 32-bits
+ ret void
+}
+
+; SI-LABEL: @trunc_i32_to_i1:
+; SI: V_AND_B32
+; SI: V_CMP_EQ_I32
+define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) {
+ %trunc = trunc i32 %a to i1
+ %result = select i1 %trunc, i32 1, i32 0
+ store i32 %result, i32 addrspace(1)* %out, align 4
ret void
}
diff --git a/test/CodeGen/R600/unhandled-loop-condition-assertion.ll b/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
new file mode 100644
index 0000000..e4129c5
--- /dev/null
+++ b/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
@@ -0,0 +1,114 @@
+; REQUIRES: asserts
+; XFAIL: *
+; RUN: llc -O0 -verify-machineinstrs -asm-verbose=0 -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=COMMON %s
+; RUN: llc -O0 -verify-machineinstrs -asm-verbose=0 -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=COMMON %s
+
+; SI hits an assertion at -O0, evergreen hits a not implemented unreachable.
+
+; COMMON-LABEL: @branch_true:
+define void @branch_true(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
+entry:
+ br i1 true, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %add.ptr.sum = shl i32 %main_stride, 1
+ %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride
+ %add.ptr4.sum = shl i32 %main_stride, 2
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
+ %0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
+ %1 = load i32 addrspace(1)* %0, align 4
+ %add.ptr = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %main_stride
+ %2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
+ %3 = load i32 addrspace(1)* %2, align 4
+ %add.ptr1 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
+ %4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
+ %5 = load i32 addrspace(1)* %4, align 4
+ %add.ptr2 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
+ %6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
+ %7 = load i32 addrspace(1)* %6, align 4
+ %add.ptr3 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
+ %8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
+ %9 = load i32 addrspace(1)* %8, align 4
+ %add.ptr6 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 undef
+ br i1 undef, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; COMMON-LABEL: @branch_false:
+; SI: .text
+; SI-NEXT: S_ENDPGM
+define void @branch_false(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
+entry:
+ br i1 false, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %add.ptr.sum = shl i32 %main_stride, 1
+ %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride
+ %add.ptr4.sum = shl i32 %main_stride, 2
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
+ %0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
+ %1 = load i32 addrspace(1)* %0, align 4
+ %add.ptr = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %main_stride
+ %2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
+ %3 = load i32 addrspace(1)* %2, align 4
+ %add.ptr1 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
+ %4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
+ %5 = load i32 addrspace(1)* %4, align 4
+ %add.ptr2 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
+ %6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
+ %7 = load i32 addrspace(1)* %6, align 4
+ %add.ptr3 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
+ %8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
+ %9 = load i32 addrspace(1)* %8, align 4
+ %add.ptr6 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 undef
+ br i1 undef, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; COMMON-LABEL: @branch_undef:
+; SI: .text
+; SI-NEXT: S_ENDPGM
+define void @branch_undef(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
+entry:
+ br i1 undef, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %add.ptr.sum = shl i32 %main_stride, 1
+ %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride
+ %add.ptr4.sum = shl i32 %main_stride, 2
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
+ %0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
+ %1 = load i32 addrspace(1)* %0, align 4
+ %add.ptr = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %main_stride
+ %2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
+ %3 = load i32 addrspace(1)* %2, align 4
+ %add.ptr1 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
+ %4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
+ %5 = load i32 addrspace(1)* %4, align 4
+ %add.ptr2 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
+ %6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
+ %7 = load i32 addrspace(1)* %6, align 4
+ %add.ptr3 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
+ %8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
+ %9 = load i32 addrspace(1)* %8, align 4
+ %add.ptr6 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 undef
+ br i1 undef, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/R600/unroll.ll b/test/CodeGen/R600/unroll.ll
new file mode 100644
index 0000000..e0035ea
--- /dev/null
+++ b/test/CodeGen/R600/unroll.ll
@@ -0,0 +1,37 @@
+; RUN: opt -loop-unroll -simplifycfg -sroa %s -S -o - | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64"
+target triple = "r600--"
+
+; This test contains a simple loop that initializes an array declared in
+; private memory. We want to make sure these kinds of loops are always
+; unrolled, because private memory is slow.
+
+; CHECK-LABEL: @test
+; CHECK-NOT: alloca
+; CHECK: store i32 5, i32 addrspace(1)* %out
+define void @test(i32 addrspace(1)* %out) {
+entry:
+ %0 = alloca [32 x i32]
+ br label %loop.header
+
+loop.header:
+ %counter = phi i32 [0, %entry], [%inc, %loop.inc]
+ br label %loop.body
+
+loop.body:
+ %ptr = getelementptr [32 x i32]* %0, i32 0, i32 %counter
+ store i32 %counter, i32* %ptr
+ br label %loop.inc
+
+loop.inc:
+ %inc = add i32 %counter, 1
+ %1 = icmp sge i32 %counter, 32
+ br i1 %1, label %exit, label %loop.header
+
+exit:
+ %2 = getelementptr [32 x i32]* %0, i32 0, i32 5
+ %3 = load i32* %2
+ store i32 %3, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/v1i64-kernel-arg.ll b/test/CodeGen/R600/v1i64-kernel-arg.ll
new file mode 100644
index 0000000..2aa1221
--- /dev/null
+++ b/test/CodeGen/R600/v1i64-kernel-arg.ll
@@ -0,0 +1,17 @@
+; REQUIRES: asserts
+; XFAIL: *
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck %s
+
+; CHECK-LABEL: @kernel_arg_i64
+define void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
+ store i64 %a, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; i64 arg works, v1i64 arg does not.
+; CHECK-LABEL: @kernel_arg_v1i64
+define void @kernel_arg_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a) nounwind {
+ store <1 x i64> %a, <1 x i64> addrspace(1)* %out, align 8
+ ret void
+}
+
diff --git a/test/CodeGen/R600/v_cndmask.ll b/test/CodeGen/R600/v_cndmask.ll
new file mode 100644
index 0000000..f8e9655
--- /dev/null
+++ b/test/CodeGen/R600/v_cndmask.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI %s
+
+; SI: @v_cnd_nan
+; SI: V_CNDMASK_B32_e64 v{{[0-9]}},
+; SI-DAG: v{{[0-9]}}
+; SI-DAG: {{nan|#QNAN}}
+define void @v_cnd_nan(float addrspace(1)* %out, i32 %c, float %f) {
+entry:
+ %0 = icmp ne i32 %c, 0
+ %1 = select i1 %0, float 0xFFFFFFFFE0000000, float %f
+ store float %1, float addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/vtx-fetch-branch.ll b/test/CodeGen/R600/vtx-fetch-branch.ll
new file mode 100644
index 0000000..0fc99de
--- /dev/null
+++ b/test/CodeGen/R600/vtx-fetch-branch.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=r600 -mcpu=redwood %s -o - | FileCheck %s
+
+; This tests for a bug where vertex fetch clauses right before an ENDIF
+; instruction where being emitted after the ENDIF. We were using ALU_POP_AFTER
+; for the ALU clause before the vetex fetch instead of emitting a POP instruction
+; after the fetch clause.
+
+
+; CHECK-LABEL: @test
+; CHECK-NOT: ALU_POP_AFTER
+; CHECK: TEX
+; CHECK-NEXT: POP
+define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %cond) {
+entry:
+ %0 = icmp eq i32 %cond, 0
+ br i1 %0, label %endif, label %if
+
+if:
+ %1 = load i32 addrspace(1)* %in
+ br label %endif
+
+endif:
+ %x = phi i32 [ %1, %if], [ 0, %entry]
+ store i32 %x, i32 addrspace(1)* %out
+ br label %done
+
+done:
+ ret void
+}
diff --git a/test/CodeGen/R600/vtx-schedule.ll b/test/CodeGen/R600/vtx-schedule.ll
index 97d37ed..ce852c5 100644
--- a/test/CodeGen/R600/vtx-schedule.ll
+++ b/test/CodeGen/R600/vtx-schedule.ll
@@ -6,9 +6,9 @@
; CHECK: @test
; CHECK: Fetch clause
-; CHECK_VTX_READ_32 [[IN0:T[0-9]+\.X]], [[IN0]], 0
+; CHECK: VTX_READ_32 [[IN0:T[0-9]+\.X]], [[IN0]], 0
; CHECK: Fetch clause
-; CHECK_VTX_READ_32 [[IN1:T[0-9]+\.X]], [[IN1]], 0
+; CHECK: VTX_READ_32 [[IN1:T[0-9]+\.X]], [[IN1]], 0
define void @test(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* addrspace(1)* nocapture %in0) {
entry:
%0 = load i32 addrspace(1)* addrspace(1)* %in0
diff --git a/test/CodeGen/R600/xor.ll b/test/CodeGen/R600/xor.ll
index c12b0c1..49ed12d 100644
--- a/test/CodeGen/R600/xor.ll
+++ b/test/CodeGen/R600/xor.ll
@@ -54,3 +54,21 @@ define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float ad
store float %result, float addrspace(1)* %out
ret void
}
+
+; SI-CHECK-LABEL: @vector_xor_i32
+; SI-CHECK: V_XOR_B32_e32
+define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
+ %a = load i32 addrspace(1)* %in0
+ %b = load i32 addrspace(1)* %in1
+ %result = xor i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @scalar_xor_i32
+; SI-CHECK: S_XOR_B32
+define void @scalar_xor_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+ %result = xor i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/zero_extend.ll b/test/CodeGen/R600/zero_extend.ll
index 481b3b3..a114bfc 100644
--- a/test/CodeGen/R600/zero_extend.ll
+++ b/test/CodeGen/R600/zero_extend.ll
@@ -16,3 +16,13 @@ entry:
store i64 %2, i64 addrspace(1)* %out
ret void
}
+
+; SI-CHECK-LABEL: @testi1toi32
+; SI-CHECK: V_CNDMASK_B32
+define void @testi1toi32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+entry:
+ %0 = icmp eq i32 %a, %b
+ %1 = zext i1 %0 to i32
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}