aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/Target/R600/SIInstructions.td7
-rw-r--r--lib/Target/R600/SIIntrinsics.td1
-rw-r--r--test/CodeGen/R600/llvm.SI.sampled.ll140
3 files changed, 147 insertions, 1 deletions
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
index 9c96c08..c9eac7d 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/R600/SIInstructions.td
@@ -535,7 +535,7 @@ def IMAGE_SAMPLE_B : MIMG_Sampler_Helper <0x00000025, "IMAGE_SAMPLE_B">;
//def IMAGE_SAMPLE_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_LZ", 0x00000027>;
def IMAGE_SAMPLE_C : MIMG_Sampler_Helper <0x00000028, "IMAGE_SAMPLE_C">;
//def IMAGE_SAMPLE_C_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CL", 0x00000029>;
-//def IMAGE_SAMPLE_C_D : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D", 0x0000002a>;
+def IMAGE_SAMPLE_C_D : MIMG_Sampler_Helper <0x0000002a, "IMAGE_SAMPLE_C_D">;
//def IMAGE_SAMPLE_C_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_CL", 0x0000002b>;
def IMAGE_SAMPLE_C_L : MIMG_Sampler_Helper <0x0000002c, "IMAGE_SAMPLE_C_L">;
def IMAGE_SAMPLE_C_B : MIMG_Sampler_Helper <0x0000002d, "IMAGE_SAMPLE_C_B">;
@@ -1296,6 +1296,11 @@ multiclass SamplePatterns<ValueType addr_type> {
def : SampleArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_B, addr_type>;
def : SampleShadowPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_type>;
def : SampleShadowArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_type>;
+
+ def : SamplePattern <int_SI_sampled, IMAGE_SAMPLE_D, addr_type>;
+ def : SampleArrayPattern <int_SI_sampled, IMAGE_SAMPLE_D, addr_type>;
+ def : SampleShadowPattern <int_SI_sampled, IMAGE_SAMPLE_C_D, addr_type>;
+ def : SampleShadowArrayPattern <int_SI_sampled, IMAGE_SAMPLE_C_D, addr_type>;
}
defm : SamplePatterns<v2i32>;
diff --git a/lib/Target/R600/SIIntrinsics.td b/lib/Target/R600/SIIntrinsics.td
index 224cd2f..d2643e0 100644
--- a/lib/Target/R600/SIIntrinsics.td
+++ b/lib/Target/R600/SIIntrinsics.td
@@ -23,6 +23,7 @@ let TargetPrefix = "SI", isTarget = 1 in {
def int_SI_sample : Sample;
def int_SI_sampleb : Sample;
+ def int_SI_sampled : Sample;
def int_SI_samplel : Sample;
def int_SI_imageload : Intrinsic <[llvm_v4i32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
diff --git a/test/CodeGen/R600/llvm.SI.sampled.ll b/test/CodeGen/R600/llvm.SI.sampled.ll
new file mode 100644
index 0000000..71b8ef5
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.sampled.ll
@@ -0,0 +1,140 @@
+;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
+
+;CHECK: IMAGE_SAMPLE_D {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 15
+;CHECK: IMAGE_SAMPLE_D {{VGPR[0-9]+_VGPR[0-9]+}}, 3
+;CHECK: IMAGE_SAMPLE_D {{VGPR[0-9]+}}, 2
+;CHECK: IMAGE_SAMPLE_D {{VGPR[0-9]+}}, 1
+;CHECK: IMAGE_SAMPLE_D {{VGPR[0-9]+}}, 4
+;CHECK: IMAGE_SAMPLE_D {{VGPR[0-9]+}}, 8
+;CHECK: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+}}, 5
+;CHECK: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+}}, 9
+;CHECK: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+}}, 6
+;CHECK: IMAGE_SAMPLE_D {{VGPR[0-9]+_VGPR[0-9]+}}, 10
+;CHECK: IMAGE_SAMPLE_D {{VGPR[0-9]+_VGPR[0-9]+}}, 12
+;CHECK: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 7
+;CHECK: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 11
+;CHECK: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 13
+;CHECK: IMAGE_SAMPLE_D {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 14
+;CHECK: IMAGE_SAMPLE_D {{VGPR[0-9]+}}, 8
+
+define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
+ %v1 = insertelement <4 x i32> undef, i32 %a1, i32 0
+ %v2 = insertelement <4 x i32> undef, i32 %a1, i32 1
+ %v3 = insertelement <4 x i32> undef, i32 %a1, i32 2
+ %v4 = insertelement <4 x i32> undef, i32 %a1, i32 3
+ %v5 = insertelement <4 x i32> undef, i32 %a2, i32 0
+ %v6 = insertelement <4 x i32> undef, i32 %a2, i32 1
+ %v7 = insertelement <4 x i32> undef, i32 %a2, i32 2
+ %v8 = insertelement <4 x i32> undef, i32 %a2, i32 3
+ %v9 = insertelement <4 x i32> undef, i32 %a3, i32 0
+ %v10 = insertelement <4 x i32> undef, i32 %a3, i32 1
+ %v11 = insertelement <4 x i32> undef, i32 %a3, i32 2
+ %v12 = insertelement <4 x i32> undef, i32 %a3, i32 3
+ %v13 = insertelement <4 x i32> undef, i32 %a4, i32 0
+ %v14 = insertelement <4 x i32> undef, i32 %a4, i32 1
+ %v15 = insertelement <4 x i32> undef, i32 %a4, i32 2
+ %v16 = insertelement <4 x i32> undef, i32 %a4, i32 3
+ %res1 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v1,
+ <8 x i32> undef, <4 x i32> undef, i32 1)
+ %res2 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v2,
+ <8 x i32> undef, <4 x i32> undef, i32 2)
+ %res3 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v3,
+ <8 x i32> undef, <4 x i32> undef, i32 3)
+ %res4 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v4,
+ <8 x i32> undef, <4 x i32> undef, i32 4)
+ %res5 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v5,
+ <8 x i32> undef, <4 x i32> undef, i32 5)
+ %res6 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v6,
+ <8 x i32> undef, <4 x i32> undef, i32 6)
+ %res7 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v7,
+ <8 x i32> undef, <4 x i32> undef, i32 7)
+ %res8 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v8,
+ <8 x i32> undef, <4 x i32> undef, i32 8)
+ %res9 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v9,
+ <8 x i32> undef, <4 x i32> undef, i32 9)
+ %res10 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v10,
+ <8 x i32> undef, <4 x i32> undef, i32 10)
+ %res11 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v11,
+ <8 x i32> undef, <4 x i32> undef, i32 11)
+ %res12 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v12,
+ <8 x i32> undef, <4 x i32> undef, i32 12)
+ %res13 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v13,
+ <8 x i32> undef, <4 x i32> undef, i32 13)
+ %res14 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v14,
+ <8 x i32> undef, <4 x i32> undef, i32 14)
+ %res15 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v15,
+ <8 x i32> undef, <4 x i32> undef, i32 15)
+ %res16 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v16,
+ <8 x i32> undef, <4 x i32> undef, i32 16)
+ %e1 = extractelement <4 x float> %res1, i32 0
+ %e2 = extractelement <4 x float> %res2, i32 1
+ %e3 = extractelement <4 x float> %res3, i32 2
+ %e4 = extractelement <4 x float> %res4, i32 3
+ %t0 = extractelement <4 x float> %res5, i32 0
+ %t1 = extractelement <4 x float> %res5, i32 1
+ %e5 = fadd float %t0, %t1
+ %t2 = extractelement <4 x float> %res6, i32 0
+ %t3 = extractelement <4 x float> %res6, i32 2
+ %e6 = fadd float %t2, %t3
+ %t4 = extractelement <4 x float> %res7, i32 0
+ %t5 = extractelement <4 x float> %res7, i32 3
+ %e7 = fadd float %t4, %t5
+ %t6 = extractelement <4 x float> %res8, i32 1
+ %t7 = extractelement <4 x float> %res8, i32 2
+ %e8 = fadd float %t6, %t7
+ %t8 = extractelement <4 x float> %res9, i32 1
+ %t9 = extractelement <4 x float> %res9, i32 3
+ %e9 = fadd float %t8, %t9
+ %t10 = extractelement <4 x float> %res10, i32 2
+ %t11 = extractelement <4 x float> %res10, i32 3
+ %e10 = fadd float %t10, %t11
+ %t12 = extractelement <4 x float> %res11, i32 0
+ %t13 = extractelement <4 x float> %res11, i32 1
+ %t14 = extractelement <4 x float> %res11, i32 2
+ %t15 = fadd float %t12, %t13
+ %e11 = fadd float %t14, %t15
+ %t16 = extractelement <4 x float> %res12, i32 0
+ %t17 = extractelement <4 x float> %res12, i32 1
+ %t18 = extractelement <4 x float> %res12, i32 3
+ %t19 = fadd float %t16, %t17
+ %e12 = fadd float %t18, %t19
+ %t20 = extractelement <4 x float> %res13, i32 0
+ %t21 = extractelement <4 x float> %res13, i32 2
+ %t22 = extractelement <4 x float> %res13, i32 3
+ %t23 = fadd float %t20, %t21
+ %e13 = fadd float %t22, %t23
+ %t24 = extractelement <4 x float> %res14, i32 1
+ %t25 = extractelement <4 x float> %res14, i32 2
+ %t26 = extractelement <4 x float> %res14, i32 3
+ %t27 = fadd float %t24, %t25
+ %e14 = fadd float %t26, %t27
+ %t28 = extractelement <4 x float> %res15, i32 0
+ %t29 = extractelement <4 x float> %res15, i32 1
+ %t30 = extractelement <4 x float> %res15, i32 2
+ %t31 = extractelement <4 x float> %res15, i32 3
+ %t32 = fadd float %t28, %t29
+ %t33 = fadd float %t30, %t31
+ %e15 = fadd float %t32, %t33
+ %e16 = extractelement <4 x float> %res16, i32 3
+ %s1 = fadd float %e1, %e2
+ %s2 = fadd float %s1, %e3
+ %s3 = fadd float %s2, %e4
+ %s4 = fadd float %s3, %e5
+ %s5 = fadd float %s4, %e6
+ %s6 = fadd float %s5, %e7
+ %s7 = fadd float %s6, %e8
+ %s8 = fadd float %s7, %e9
+ %s9 = fadd float %s8, %e10
+ %s10 = fadd float %s9, %e11
+ %s11 = fadd float %s10, %e12
+ %s12 = fadd float %s11, %e13
+ %s13 = fadd float %s12, %e14
+ %s14 = fadd float %s13, %e15
+ %s15 = fadd float %s14, %e16
+ call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %s15, float %s15, float %s15, float %s15)
+ ret void
+}
+
+declare <4 x float> @llvm.SI.sampled.(<4 x i32>, <8 x i32>, <4 x i32>, i32) readnone
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)