From 7eacad03efda36e09ebd96e95d7891cadaaa9087 Mon Sep 17 00:00:00 2001 From: Justin Holewinski Date: Tue, 12 Feb 2013 14:18:49 +0000 Subject: [NVPTX] Disable vector registers Vectors were being manually scalarized by the backend. Instead, let the target-independent code do all of the work. The manual scalarization was from a time before good target-independent support for scalarization in LLVM. However, this forces us to specially-handle vector loads and stores, which we can turn into PTX instructions that produce/consume multiple operands. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@174968 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/NVPTX/vector-loads.ll | 66 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 test/CodeGen/NVPTX/vector-loads.ll (limited to 'test/CodeGen/NVPTX/vector-loads.ll') diff --git a/test/CodeGen/NVPTX/vector-loads.ll b/test/CodeGen/NVPTX/vector-loads.ll new file mode 100644 index 0000000..f5a1795 --- /dev/null +++ b/test/CodeGen/NVPTX/vector-loads.ll @@ -0,0 +1,66 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s + +; Even though general vector types are not supported in PTX, we can still +; optimize loads/stores with pseudo-vector instructions of the form: +; +; ld.v2.f32 {%f0, %f1}, [%r0] +; +; which will load two floats at once into scalar registers. + +define void @foo(<2 x float>* %a) { +; CHECK: .func foo +; CHECK: ld.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}]; + %t1 = load <2 x float>* %a + %t2 = fmul <2 x float> %t1, %t1 + store <2 x float> %t2, <2 x float>* %a + ret void +} + +define void @foo2(<4 x float>* %a) { +; CHECK: .func foo2 +; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}]; + %t1 = load <4 x float>* %a + %t2 = fmul <4 x float> %t1, %t1 + store <4 x float> %t2, <4 x float>* %a + ret void +} + +define void @foo3(<8 x float>* %a) { +; CHECK: .func foo3 +; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}]; +; CHECK-NEXT: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}+16]; + %t1 = load <8 x float>* %a + %t2 = fmul <8 x float> %t1, %t1 + store <8 x float> %t2, <8 x float>* %a + ret void +} + + + +define void @foo4(<2 x i32>* %a) { +; CHECK: .func foo4 +; CHECK: ld.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}]; + %t1 = load <2 x i32>* %a + %t2 = mul <2 x i32> %t1, %t1 + store <2 x i32> %t2, <2 x i32>* %a + ret void +} + +define void @foo5(<4 x i32>* %a) { +; CHECK: .func foo5 +; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}]; + %t1 = load <4 x i32>* %a + %t2 = mul <4 x i32> %t1, %t1 + store <4 x i32> %t2, <4 x i32>* %a + ret void +} + +define void @foo6(<8 x i32>* %a) { +; CHECK: .func foo6 +; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}]; +; CHECK-NEXT: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}+16]; + %t1 = load <8 x i32>* %a + %t2 = mul <8 x i32> %t1, %t1 + store <8 x i32> %t2, <8 x i32>* %a + ret void +} -- cgit v1.1