aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/R600/llvm.AMDGPU.div_scale.ll')
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_scale.ll88
1 files changed, 44 insertions, 44 deletions
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll b/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
index 5773da0..de830de 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
@@ -13,11 +13,11 @@ declare float @llvm.fabs.f32(float) nounwind readnone
; SI: s_endpgm
define void @test_div_scale_f32_1(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
- %a = load float addrspace(1)* %gep.0, align 4
- %b = load float addrspace(1)* %gep.1, align 4
+ %a = load float, float addrspace(1)* %gep.0, align 4
+ %b = load float, float addrspace(1)* %gep.1, align 4
%result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
%result0 = extractvalue { float, i1 } %result, 0
@@ -33,11 +33,11 @@ define void @test_div_scale_f32_1(float addrspace(1)* %out, float addrspace(1)*
; SI: s_endpgm
define void @test_div_scale_f32_2(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
- %a = load float addrspace(1)* %gep.0, align 4
- %b = load float addrspace(1)* %gep.1, align 4
+ %a = load float, float addrspace(1)* %gep.0, align 4
+ %b = load float, float addrspace(1)* %gep.1, align 4
%result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 true) nounwind readnone
%result0 = extractvalue { float, i1 } %result, 0
@@ -53,11 +53,11 @@ define void @test_div_scale_f32_2(float addrspace(1)* %out, float addrspace(1)*
; SI: s_endpgm
define void @test_div_scale_f64_1(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
- %a = load double addrspace(1)* %gep.0, align 8
- %b = load double addrspace(1)* %gep.1, align 8
+ %a = load double, double addrspace(1)* %gep.0, align 8
+ %b = load double, double addrspace(1)* %gep.1, align 8
%result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 false) nounwind readnone
%result0 = extractvalue { double, i1 } %result, 0
@@ -73,11 +73,11 @@ define void @test_div_scale_f64_1(double addrspace(1)* %out, double addrspace(1)
; SI: s_endpgm
define void @test_div_scale_f64_2(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
- %a = load double addrspace(1)* %gep.0, align 8
- %b = load double addrspace(1)* %gep.1, align 8
+ %a = load double, double addrspace(1)* %gep.0, align 8
+ %b = load double, double addrspace(1)* %gep.1, align 8
%result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
%result0 = extractvalue { double, i1 } %result, 0
@@ -93,9 +93,9 @@ define void @test_div_scale_f64_2(double addrspace(1)* %out, double addrspace(1)
; SI: s_endpgm
define void @test_div_scale_f32_scalar_num_1(float addrspace(1)* %out, float addrspace(1)* %in, float %a) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
- %b = load float addrspace(1)* %gep, align 4
+ %b = load float, float addrspace(1)* %gep, align 4
%result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
%result0 = extractvalue { float, i1 } %result, 0
@@ -111,9 +111,9 @@ define void @test_div_scale_f32_scalar_num_1(float addrspace(1)* %out, float add
; SI: s_endpgm
define void @test_div_scale_f32_scalar_num_2(float addrspace(1)* %out, float addrspace(1)* %in, float %a) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
- %b = load float addrspace(1)* %gep, align 4
+ %b = load float, float addrspace(1)* %gep, align 4
%result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 true) nounwind readnone
%result0 = extractvalue { float, i1 } %result, 0
@@ -129,9 +129,9 @@ define void @test_div_scale_f32_scalar_num_2(float addrspace(1)* %out, float add
; SI: s_endpgm
define void @test_div_scale_f32_scalar_den_1(float addrspace(1)* %out, float addrspace(1)* %in, float %b) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
- %a = load float addrspace(1)* %gep, align 4
+ %a = load float, float addrspace(1)* %gep, align 4
%result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
%result0 = extractvalue { float, i1 } %result, 0
@@ -147,9 +147,9 @@ define void @test_div_scale_f32_scalar_den_1(float addrspace(1)* %out, float add
; SI: s_endpgm
define void @test_div_scale_f32_scalar_den_2(float addrspace(1)* %out, float addrspace(1)* %in, float %b) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
- %a = load float addrspace(1)* %gep, align 4
+ %a = load float, float addrspace(1)* %gep, align 4
%result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 true) nounwind readnone
%result0 = extractvalue { float, i1 } %result, 0
@@ -165,9 +165,9 @@ define void @test_div_scale_f32_scalar_den_2(float addrspace(1)* %out, float add
; SI: s_endpgm
define void @test_div_scale_f64_scalar_num_1(double addrspace(1)* %out, double addrspace(1)* %in, double %a) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
- %b = load double addrspace(1)* %gep, align 8
+ %b = load double, double addrspace(1)* %gep, align 8
%result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 false) nounwind readnone
%result0 = extractvalue { double, i1 } %result, 0
@@ -183,9 +183,9 @@ define void @test_div_scale_f64_scalar_num_1(double addrspace(1)* %out, double a
; SI: s_endpgm
define void @test_div_scale_f64_scalar_num_2(double addrspace(1)* %out, double addrspace(1)* %in, double %a) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
- %b = load double addrspace(1)* %gep, align 8
+ %b = load double, double addrspace(1)* %gep, align 8
%result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
%result0 = extractvalue { double, i1 } %result, 0
@@ -201,9 +201,9 @@ define void @test_div_scale_f64_scalar_num_2(double addrspace(1)* %out, double a
; SI: s_endpgm
define void @test_div_scale_f64_scalar_den_1(double addrspace(1)* %out, double addrspace(1)* %in, double %b) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
- %a = load double addrspace(1)* %gep, align 8
+ %a = load double, double addrspace(1)* %gep, align 8
%result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 false) nounwind readnone
%result0 = extractvalue { double, i1 } %result, 0
@@ -219,9 +219,9 @@ define void @test_div_scale_f64_scalar_den_1(double addrspace(1)* %out, double a
; SI: s_endpgm
define void @test_div_scale_f64_scalar_den_2(double addrspace(1)* %out, double addrspace(1)* %in, double %b) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
- %a = load double addrspace(1)* %gep, align 8
+ %a = load double, double addrspace(1)* %gep, align 8
%result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
%result0 = extractvalue { double, i1 } %result, 0
@@ -294,8 +294,8 @@ define void @test_div_scale_f64_all_scalar_2(double addrspace(1)* %out, double %
; SI: s_endpgm
define void @test_div_scale_f32_inline_imm_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %a = load float addrspace(1)* %gep.0, align 4
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %a = load float, float addrspace(1)* %gep.0, align 4
%result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float 1.0, float %a, i1 false) nounwind readnone
%result0 = extractvalue { float, i1 } %result, 0
@@ -310,8 +310,8 @@ define void @test_div_scale_f32_inline_imm_num(float addrspace(1)* %out, float a
; SI: s_endpgm
define void @test_div_scale_f32_inline_imm_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %a = load float addrspace(1)* %gep.0, align 4
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %a = load float, float addrspace(1)* %gep.0, align 4
%result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float 2.0, i1 false) nounwind readnone
%result0 = extractvalue { float, i1 } %result, 0
@@ -327,11 +327,11 @@ define void @test_div_scale_f32_inline_imm_den(float addrspace(1)* %out, float a
; SI: s_endpgm
define void @test_div_scale_f32_fabs_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
- %a = load float addrspace(1)* %gep.0, align 4
- %b = load float addrspace(1)* %gep.1, align 4
+ %a = load float, float addrspace(1)* %gep.0, align 4
+ %b = load float, float addrspace(1)* %gep.1, align 4
%a.fabs = call float @llvm.fabs.f32(float %a) nounwind readnone
@@ -349,11 +349,11 @@ define void @test_div_scale_f32_fabs_num(float addrspace(1)* %out, float addrspa
; SI: s_endpgm
define void @test_div_scale_f32_fabs_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
- %a = load float addrspace(1)* %gep.0, align 4
- %b = load float addrspace(1)* %gep.1, align 4
+ %a = load float, float addrspace(1)* %gep.0, align 4
+ %b = load float, float addrspace(1)* %gep.1, align 4
%b.fabs = call float @llvm.fabs.f32(float %b) nounwind readnone