diff options
author | Tom Stellard <thomas.stellard@amd.com> | 2013-08-14 23:24:32 +0000 |
---|---|---|
committer | Tom Stellard <thomas.stellard@amd.com> | 2013-08-14 23:24:32 +0000 |
commit | 38d5e1c36d954f1ff6489f58efd1d4865217cf9b (patch) | |
tree | 451454dd8bf6ea5ec2f3ea021da2c7f6de4a928a /test/CodeGen/R600/si-lod-bias.ll | |
parent | 636298ba64fd07d4ddcae6005e7fc1db43eb5335 (diff) | |
download | external_llvm-38d5e1c36d954f1ff6489f58efd1d4865217cf9b.zip external_llvm-38d5e1c36d954f1ff6489f58efd1d4865217cf9b.tar.gz external_llvm-38d5e1c36d954f1ff6489f58efd1d4865217cf9b.tar.bz2 |
R600/SI: Lower BUILD_VECTOR to REG_SEQUENCE v2
Using REG_SEQUENCE for BUILD_VECTOR rather than a series of INSERT_SUBREG
instructions should make it easier for the register allocator to coalasce
unnecessary copies.
v2:
- Use an SGPR register class if all the operands of BUILD_VECTOR are
SGPRs.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188427 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/R600/si-lod-bias.ll')
-rw-r--r-- | test/CodeGen/R600/si-lod-bias.ll | 50 |
1 files changed, 50 insertions, 0 deletions
diff --git a/test/CodeGen/R600/si-lod-bias.ll b/test/CodeGen/R600/si-lod-bias.ll new file mode 100644 index 0000000..9b58f2a --- /dev/null +++ b/test/CodeGen/R600/si-lod-bias.ll @@ -0,0 +1,50 @@ +;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s + +; This shader has the potential to generated illeagal VGPR to SGPR copies if +; the wrong register class is used for the REG_SEQUENCE instructions. + +; CHECK: @main +; CHECK: IMAGE_SAMPLE_B VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}, 15, 0, 0, 0, 0, 0, 0, 0, VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}} + +define void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 { +main_body: + %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0 + %21 = load <16 x i8> addrspace(2)* %20, !tbaa !0 + %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16) + %23 = getelementptr <32 x i8> addrspace(2)* %2, i32 0 + %24 = load <32 x i8> addrspace(2)* %23, !tbaa !0 + %25 = getelementptr <16 x i8> addrspace(2)* %1, i32 0 + %26 = load <16 x i8> addrspace(2)* %25, !tbaa !0 + %27 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %3, <2 x i32> %5) + %28 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %3, <2 x i32> %5) + %29 = bitcast float %22 to i32 + %30 = bitcast float %27 to i32 + %31 = bitcast float %28 to i32 + %32 = insertelement <4 x i32> undef, i32 %29, i32 0 + %33 = insertelement <4 x i32> %32, i32 %30, i32 1 + %34 = insertelement <4 x i32> %33, i32 %31, i32 2 + %35 = insertelement <4 x i32> %34, i32 undef, i32 3 + %36 = call <4 x float> @llvm.SI.sampleb.v4i32(<4 x i32> %35, <32 x i8> %24, <16 x i8> %26, i32 2) + %37 = extractelement <4 x float> %36, i32 0 + %38 = extractelement <4 x float> %36, i32 1 + %39 = extractelement <4 x float> %36, i32 2 + %40 = extractelement <4 x float> %36, i32 3 + call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %37, float %38, float %39, float %40) + ret void +} + +; Function Attrs: nounwind readnone +declare float @llvm.SI.load.const(<16 x i8>, i32) #1 + +; Function Attrs: nounwind readnone +declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1 + +; Function Attrs: nounwind readnone +declare <4 x float> @llvm.SI.sampleb.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32) #1 + +declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float) + +attributes #0 = { "ShaderType"="0" } +attributes #1 = { nounwind readnone } + +!0 = metadata !{metadata !"const", null, i32 1} |