aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/X86/tailcallbyval64.ll
diff options
context:
space:
mode:
authorArnold Schwaighofer <arnold.schwaighofer@gmail.com>2008-04-12 18:11:06 +0000
committerArnold Schwaighofer <arnold.schwaighofer@gmail.com>2008-04-12 18:11:06 +0000
commit4b5324ad2cbf774c9c6ed02ea0fcc864f2f5f885 (patch)
treeb51fd2fb6c3ff45ccbbfb62170b614c9efd2489e /test/CodeGen/X86/tailcallbyval64.ll
parent460a14e09c2af630fc1e840dcb3e0f725663067b (diff)
downloadexternal_llvm-4b5324ad2cbf774c9c6ed02ea0fcc864f2f5f885.zip
external_llvm-4b5324ad2cbf774c9c6ed02ea0fcc864f2f5f885.tar.gz
external_llvm-4b5324ad2cbf774c9c6ed02ea0fcc864f2f5f885.tar.bz2
This patch corrects the handling of byval arguments for tailcall
optimized x86-64 (and x86) calls so that they work (... at least for my test cases). Should fix the following problems: Problem 1: When i introduced the optimized handling of arguments for tail called functions (using a sequence of copyto/copyfrom virtual registers instead of always lowering to top of the stack) i did not handle byval arguments correctly e.g they did not work at all :). Problem 2: On x86-64 after the arguments of the tail called function are moved to their registers (which include ESI/RSI etc), tail call optimization performs byval lowering which causes xSI,xDI, xCX registers to be overwritten. This is handled in this patch by moving the arguments to virtual registers first and after the byval lowering the arguments are moved from those virtual registers back to RSI/RDI/RCX. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49584 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/tailcallbyval64.ll')
-rw-r--r--test/CodeGen/X86/tailcallbyval64.ll29
1 files changed, 29 insertions, 0 deletions
diff --git a/test/CodeGen/X86/tailcallbyval64.ll b/test/CodeGen/X86/tailcallbyval64.ll
new file mode 100644
index 0000000..7b65863
--- /dev/null
+++ b/test/CodeGen/X86/tailcallbyval64.ll
@@ -0,0 +1,29 @@
+; RUN: llvm-as < %s | llc -march=x86-64 -tailcallopt | grep TAILCALL
+; Expect 2 rep;movs because of tail call byval lowering.
+; RUN: llvm-as < %s | llc -march=x86-64 -tailcallopt | grep rep | wc -l | grep 2
+; A sequence of copyto/copyfrom virtual registers is used to deal with byval
+; lowering appearing after moving arguments to registers. The following two
+; checks verify that the register allocator changes those sequences to direct
+; moves to argument register where it can (for registers that are not used in
+; byval lowering - not rsi, not rdi, not rcx).
+; Expect argument 4 to be moved directly to register edx.
+; RUN: llvm-as < %s | llc -march=x86-64 -tailcallopt | grep movl | grep {7} | grep edx
+; Expect argument 6 to be moved directly to register r8.
+; RUN: llvm-as < %s | llc -march=x86-64 -tailcallopt | grep movl | grep {17} | grep r8
+
+%struct.s = type { i64, i64, i64, i64, i64, i64, i64, i64,
+ i64, i64, i64, i64, i64, i64, i64, i64,
+ i64, i64, i64, i64, i64, i64, i64, i64 }
+
+declare fastcc i64 @tailcallee(%struct.s* byval %a, i64 %val, i64 %val2, i64 %val3, i64 %val4, i64 %val5)
+
+
+define fastcc i64 @tailcaller(i64 %b, %struct.s* byval %a) {
+entry:
+ %tmp2 = getelementptr %struct.s* %a, i32 0, i32 1
+ %tmp3 = load i64* %tmp2, align 8
+ %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* %a byval, i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
+ ret i64 %tmp4
+}
+
+