diff options
Diffstat (limited to 'test/CodeGen/R600/ds_read2st64.ll')
-rw-r--r-- | test/CodeGen/R600/ds_read2st64.ll | 130 |
1 files changed, 65 insertions, 65 deletions
diff --git a/test/CodeGen/R600/ds_read2st64.ll b/test/CodeGen/R600/ds_read2st64.ll index efd875e..482debb 100644 --- a/test/CodeGen/R600/ds_read2st64.ll +++ b/test/CodeGen/R600/ds_read2st64.ll @@ -12,13 +12,13 @@ ; SI: s_endpgm define void @simple_read2st64_f32_0_1(float addrspace(1)* %out) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 - %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i - %val0 = load float addrspace(3)* %arrayidx0, align 4 + %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + %val0 = load float, float addrspace(3)* %arrayidx0, align 4 %add.x = add nsw i32 %x.i, 64 - %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x - %val1 = load float addrspace(3)* %arrayidx1, align 4 + %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + %val1 = load float, float addrspace(3)* %arrayidx1, align 4 %sum = fadd float %val0, %val1 - %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i store float %sum, float addrspace(1)* %out.gep, align 4 ret void } @@ -32,13 +32,13 @@ define void @simple_read2st64_f32_0_1(float addrspace(1)* %out) #0 { define void @simple_read2st64_f32_1_2(float addrspace(1)* %out, float addrspace(3)* %lds) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 %add.x.0 = add nsw i32 %x.i, 64 - %arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.0 - %val0 = load float addrspace(3)* %arrayidx0, align 4 + %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0 + %val0 = load float, float addrspace(3)* %arrayidx0, align 4 %add.x.1 = add nsw i32 %x.i, 128 - %arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.1 - %val1 = load float addrspace(3)* %arrayidx1, align 4 + %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.1 + %val1 = load float, float addrspace(3)* %arrayidx1, align 4 %sum = fadd float %val0, %val1 - %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i store float %sum, float addrspace(1)* %out.gep, align 4 ret void } @@ -52,13 +52,13 @@ define void @simple_read2st64_f32_1_2(float addrspace(1)* %out, float addrspace( define void @simple_read2st64_f32_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 %add.x.0 = add nsw i32 %x.i, 64 - %arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.0 - %val0 = load float addrspace(3)* %arrayidx0, align 4 + %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0 + %val0 = load float, float addrspace(3)* %arrayidx0, align 4 %add.x.1 = add nsw i32 %x.i, 16320 - %arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.1 - %val1 = load float addrspace(3)* %arrayidx1, align 4 + %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.1 + %val1 = load float, float addrspace(3)* %arrayidx1, align 4 %sum = fadd float %val0, %val1 - %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i store float %sum, float addrspace(1)* %out.gep, align 4 ret void } @@ -72,13 +72,13 @@ define void @simple_read2st64_f32_max_offset(float addrspace(1)* %out, float add define void @simple_read2st64_f32_over_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 %add.x.0 = add nsw i32 %x.i, 64 - %arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.0 - %val0 = load float addrspace(3)* %arrayidx0, align 4 + %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0 + %val0 = load float, float addrspace(3)* %arrayidx0, align 4 %add.x.1 = add nsw i32 %x.i, 16384 - %arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.1 - %val1 = load float addrspace(3)* %arrayidx1, align 4 + %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.1 + %val1 = load float, float addrspace(3)* %arrayidx1, align 4 %sum = fadd float %val0, %val1 - %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i store float %sum, float addrspace(1)* %out.gep, align 4 ret void } @@ -88,13 +88,13 @@ define void @simple_read2st64_f32_over_max_offset(float addrspace(1)* %out, floa ; SI: s_endpgm define void @odd_invalid_read2st64_f32_0(float addrspace(1)* %out) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 - %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i - %val0 = load float addrspace(3)* %arrayidx0, align 4 + %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + %val0 = load float, float addrspace(3)* %arrayidx0, align 4 %add.x = add nsw i32 %x.i, 63 - %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x - %val1 = load float addrspace(3)* %arrayidx1, align 4 + %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + %val1 = load float, float addrspace(3)* %arrayidx1, align 4 %sum = fadd float %val0, %val1 - %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i store float %sum, float addrspace(1)* %out.gep, align 4 ret void } @@ -105,13 +105,13 @@ define void @odd_invalid_read2st64_f32_0(float addrspace(1)* %out) #0 { define void @odd_invalid_read2st64_f32_1(float addrspace(1)* %out) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 %add.x.0 = add nsw i32 %x.i, 64 - %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.0 - %val0 = load float addrspace(3)* %arrayidx0, align 4 + %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.0 + %val0 = load float, float addrspace(3)* %arrayidx0, align 4 %add.x.1 = add nsw i32 %x.i, 127 - %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.1 - %val1 = load float addrspace(3)* %arrayidx1, align 4 + %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.1 + %val1 = load float, float addrspace(3)* %arrayidx1, align 4 %sum = fadd float %val0, %val1 - %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i store float %sum, float addrspace(1)* %out.gep, align 4 ret void } @@ -124,13 +124,13 @@ define void @odd_invalid_read2st64_f32_1(float addrspace(1)* %out) #0 { ; SI: s_endpgm define void @simple_read2st64_f64_0_1(double addrspace(1)* %out) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 - %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i - %val0 = load double addrspace(3)* %arrayidx0, align 8 + %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i + %val0 = load double, double addrspace(3)* %arrayidx0, align 8 %add.x = add nsw i32 %x.i, 64 - %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x - %val1 = load double addrspace(3)* %arrayidx1, align 8 + %arrayidx1 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x + %val1 = load double, double addrspace(3)* %arrayidx1, align 8 %sum = fadd double %val0, %val1 - %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i store double %sum, double addrspace(1)* %out.gep, align 8 ret void } @@ -144,13 +144,13 @@ define void @simple_read2st64_f64_0_1(double addrspace(1)* %out) #0 { define void @simple_read2st64_f64_1_2(double addrspace(1)* %out, double addrspace(3)* %lds) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 %add.x.0 = add nsw i32 %x.i, 64 - %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0 - %val0 = load double addrspace(3)* %arrayidx0, align 8 + %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0 + %val0 = load double, double addrspace(3)* %arrayidx0, align 8 %add.x.1 = add nsw i32 %x.i, 128 - %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1 - %val1 = load double addrspace(3)* %arrayidx1, align 8 + %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1 + %val1 = load double, double addrspace(3)* %arrayidx1, align 8 %sum = fadd double %val0, %val1 - %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i store double %sum, double addrspace(1)* %out.gep, align 8 ret void } @@ -163,13 +163,13 @@ define void @simple_read2st64_f64_1_2(double addrspace(1)* %out, double addrspac ; SI: s_endpgm define void @misaligned_read2st64_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 - %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i - %val0 = load double addrspace(3)* %arrayidx0, align 4 + %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i + %val0 = load double, double addrspace(3)* %arrayidx0, align 4 %add.x = add nsw i32 %x.i, 64 - %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x - %val1 = load double addrspace(3)* %arrayidx1, align 4 + %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x + %val1 = load double, double addrspace(3)* %arrayidx1, align 4 %sum = fadd double %val0, %val1 - %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i store double %sum, double addrspace(1)* %out.gep, align 4 ret void } @@ -184,13 +184,13 @@ define void @misaligned_read2st64_f64(double addrspace(1)* %out, double addrspac define void @simple_read2st64_f64_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 %add.x.0 = add nsw i32 %x.i, 256 - %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0 - %val0 = load double addrspace(3)* %arrayidx0, align 8 + %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0 + %val0 = load double, double addrspace(3)* %arrayidx0, align 8 %add.x.1 = add nsw i32 %x.i, 8128 - %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1 - %val1 = load double addrspace(3)* %arrayidx1, align 8 + %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1 + %val1 = load double, double addrspace(3)* %arrayidx1, align 8 %sum = fadd double %val0, %val1 - %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i store double %sum, double addrspace(1)* %out.gep, align 8 ret void } @@ -204,13 +204,13 @@ define void @simple_read2st64_f64_max_offset(double addrspace(1)* %out, double a define void @simple_read2st64_f64_over_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 %add.x.0 = add nsw i32 %x.i, 64 - %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0 - %val0 = load double addrspace(3)* %arrayidx0, align 8 + %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0 + %val0 = load double, double addrspace(3)* %arrayidx0, align 8 %add.x.1 = add nsw i32 %x.i, 8192 - %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1 - %val1 = load double addrspace(3)* %arrayidx1, align 8 + %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1 + %val1 = load double, double addrspace(3)* %arrayidx1, align 8 %sum = fadd double %val0, %val1 - %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i store double %sum, double addrspace(1)* %out.gep, align 8 ret void } @@ -221,13 +221,13 @@ define void @simple_read2st64_f64_over_max_offset(double addrspace(1)* %out, dou define void @invalid_read2st64_f64_odd_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 %add.x.0 = add nsw i32 %x.i, 64 - %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0 - %val0 = load double addrspace(3)* %arrayidx0, align 8 + %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0 + %val0 = load double, double addrspace(3)* %arrayidx0, align 8 %add.x.1 = add nsw i32 %x.i, 8129 - %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1 - %val1 = load double addrspace(3)* %arrayidx1, align 8 + %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1 + %val1 = load double, double addrspace(3)* %arrayidx1, align 8 %sum = fadd double %val0, %val1 - %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i store double %sum, double addrspace(1)* %out.gep, align 8 ret void } @@ -241,13 +241,13 @@ define void @invalid_read2st64_f64_odd_offset(double addrspace(1)* %out, double ; SI: s_endpgm define void @byte_size_only_divisible_64_read2_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 - %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i - %val0 = load double addrspace(3)* %arrayidx0, align 8 + %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i + %val0 = load double, double addrspace(3)* %arrayidx0, align 8 %add.x = add nsw i32 %x.i, 8 - %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x - %val1 = load double addrspace(3)* %arrayidx1, align 8 + %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x + %val1 = load double, double addrspace(3)* %arrayidx1, align 8 %sum = fadd double %val0, %val1 - %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i + %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i store double %sum, double addrspace(1)* %out.gep, align 4 ret void } |