From f17a25c88b892d30c2b41ba7ecdfbdfb2b4be9cc Mon Sep 17 00:00:00 2001 From: Dan Gohman Date: Wed, 18 Jul 2007 16:29:46 +0000 Subject: It's not necessary to do rounding for alloca operations when the requested alignment is equal to the stack alignment. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40004 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/ARM/vargs2.ll | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 test/CodeGen/ARM/vargs2.ll (limited to 'test/CodeGen/ARM/vargs2.ll') diff --git a/test/CodeGen/ARM/vargs2.ll b/test/CodeGen/ARM/vargs2.ll new file mode 100644 index 0000000..a58516f --- /dev/null +++ b/test/CodeGen/ARM/vargs2.ll @@ -0,0 +1,36 @@ +; RUN: llvm-upgrade < %s | llvm-as | llc -march=thumb +; RUN: llvm-upgrade < %s | llvm-as | llc -march=thumb | \ +; RUN: grep pop | wc -l | grep 2 + +%str = internal constant [4 x sbyte] c"%d\0A\00" ; <[4 x sbyte]*> [#uses=1] + +implementation ; Functions: + +void %f(int %a, ...) { +entry: + %va = alloca sbyte*, align 4 ; [#uses=4] + call void %llvm.va_start( sbyte** %va ) + br label %bb + +bb: ; preds = %bb, %entry + %a_addr.0 = phi int [ %a, %entry ], [ %tmp5, %bb ] ; [#uses=2] + %tmp = volatile load sbyte** %va ; [#uses=2] + %tmp2 = getelementptr sbyte* %tmp, int 4 ; [#uses=1] + volatile store sbyte* %tmp2, sbyte** %va + %tmp5 = add int %a_addr.0, -1 ; [#uses=1] + %tmp = seteq int %a_addr.0, 1 ; [#uses=1] + br bool %tmp, label %bb7, label %bb + +bb7: ; preds = %bb + %tmp3 = cast sbyte* %tmp to int* ; [#uses=1] + %tmp = load int* %tmp3 ; [#uses=1] + %tmp10 = call int (sbyte*, ...)* %printf( sbyte* getelementptr ([4 x sbyte]* %str, int 0, uint 0), int %tmp ) ; [#uses=0] + call void %llvm.va_end( sbyte** %va ) + ret void +} + +declare void %llvm.va_start(sbyte**) + +declare int %printf(sbyte*, ...) + +declare void %llvm.va_end(sbyte**) -- cgit v1.1