diff options
Diffstat (limited to 'test/CodeGen/R600/global_atomics.ll')
-rw-r--r-- | test/CodeGen/R600/global_atomics.ll | 160 |
1 files changed, 80 insertions, 80 deletions
diff --git a/test/CodeGen/R600/global_atomics.ll b/test/CodeGen/R600/global_atomics.ll index 5a07a02..847950f 100644 --- a/test/CodeGen/R600/global_atomics.ll +++ b/test/CodeGen/R600/global_atomics.ll @@ -4,7 +4,7 @@ ; SI: buffer_atomic_add v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_add_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -14,7 +14,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_add_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -24,8 +24,8 @@ entry: ; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} define void @atomic_add_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -35,8 +35,8 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -64,7 +64,7 @@ entry: ; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} define void @atomic_add_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -74,7 +74,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_add_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -84,7 +84,7 @@ entry: ; SI: buffer_atomic_and v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -94,7 +94,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_and_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -104,8 +104,8 @@ entry: ; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} define void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -115,8 +115,8 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -144,7 +144,7 @@ entry: ; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} define void @atomic_and_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -154,7 +154,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_and_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -164,7 +164,7 @@ entry: ; SI: buffer_atomic_sub v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -174,7 +174,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_sub_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -184,8 +184,8 @@ entry: ; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} define void @atomic_sub_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -195,8 +195,8 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -224,7 +224,7 @@ entry: ; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} define void @atomic_sub_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -234,7 +234,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_sub_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -244,7 +244,7 @@ entry: ; SI: buffer_atomic_smax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -254,7 +254,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -264,8 +264,8 @@ entry: ; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} define void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -275,8 +275,8 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -304,7 +304,7 @@ entry: ; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} define void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -314,7 +314,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_max_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -324,7 +324,7 @@ entry: ; SI: buffer_atomic_umax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -334,7 +334,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -344,8 +344,8 @@ entry: ; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} define void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -355,8 +355,8 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -384,7 +384,7 @@ entry: ; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} define void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -394,7 +394,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_umax_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -404,7 +404,7 @@ entry: ; SI: buffer_atomic_smin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -414,7 +414,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -424,8 +424,8 @@ entry: ; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} define void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -435,8 +435,8 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -464,7 +464,7 @@ entry: ; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} define void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -474,7 +474,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_min_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -484,7 +484,7 @@ entry: ; SI: buffer_atomic_umin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -494,7 +494,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -504,8 +504,8 @@ entry: ; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} define void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -515,8 +515,8 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -544,7 +544,7 @@ entry: ; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} define void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -554,7 +554,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_umin_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -564,7 +564,7 @@ entry: ; SI: buffer_atomic_or v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_or_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -574,7 +574,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_or_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -584,8 +584,8 @@ entry: ; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} define void @atomic_or_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -595,8 +595,8 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -624,7 +624,7 @@ entry: ; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} define void @atomic_or_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -634,7 +634,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_or_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -644,7 +644,7 @@ entry: ; SI: buffer_atomic_swap v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -654,7 +654,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_xchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -664,8 +664,8 @@ entry: ; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} define void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -675,8 +675,8 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -704,7 +704,7 @@ entry: ; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} define void @atomic_xchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -714,7 +714,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_xchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -724,7 +724,7 @@ entry: ; SI: buffer_atomic_xor v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -734,7 +734,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_xor_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -744,8 +744,8 @@ entry: ; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} define void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -755,8 +755,8 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void @@ -784,7 +784,7 @@ entry: ; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} define void @atomic_xor_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -794,7 +794,7 @@ entry: ; SI: buffer_store_dword [[RET]] define void @atomic_xor_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: - %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst store i32 %0, i32 addrspace(1)* %out2 ret void |