diff options
author | Tanya Lattner <tonic@nondot.org> | 2008-02-17 20:02:20 +0000 |
---|---|---|
committer | Tanya Lattner <tonic@nondot.org> | 2008-02-17 20:02:20 +0000 |
commit | 246a1372c91555031a42b6bf85a97318095a1904 (patch) | |
tree | 0a2fe45a96ee63e58e105f65957fc5b9ffe9dc0b /test/CodeGen/ARM | |
parent | 7644ff38715946b21501da228a963a8e5da6889d (diff) | |
download | external_llvm-246a1372c91555031a42b6bf85a97318095a1904.zip external_llvm-246a1372c91555031a42b6bf85a97318095a1904.tar.gz external_llvm-246a1372c91555031a42b6bf85a97318095a1904.tar.bz2 |
Remove llvm-upgrade.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@47238 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/ARM')
50 files changed, 906 insertions, 906 deletions
diff --git a/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll b/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll index 49ebead..caa9a98 100644 --- a/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll +++ b/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll @@ -1,20 +1,20 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v6 +; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 - %struct.layer_data = type { int, [2048 x ubyte], ubyte*, [16 x ubyte], uint, ubyte*, int, int, [64 x int], [64 x int], [64 x int], [64 x int], int, int, int, int, int, int, int, int, int, int, int, int, [12 x [64 x short]] } -%ld = external global %struct.layer_data* +%struct.layer_data = type { i32, [2048 x i8], i8*, [16 x i8], i32, i8*, i32, i32, [64 x i32], [64 x i32], [64 x i32], [64 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [12 x [64 x i16]] } +@ld = external global %struct.layer_data* ; <%struct.layer_data**> [#uses=1] -void %main() { +define void @main() { entry: - br bool false, label %bb169.i, label %cond_true11 + br i1 false, label %bb169.i, label %cond_true11 -bb169.i: +bb169.i: ; preds = %entry ret void -cond_true11: - %tmp.i32 = load %struct.layer_data** %ld - %tmp3.i35 = getelementptr %struct.layer_data* %tmp.i32, int 0, uint 1, int 2048 - %tmp.i36 = getelementptr %struct.layer_data* %tmp.i32, int 0, uint 2 - store ubyte* %tmp3.i35, ubyte** %tmp.i36 - store ubyte* %tmp3.i35, ubyte** null - ret void +cond_true11: ; preds = %entry + %tmp.i32 = load %struct.layer_data** @ld ; <%struct.layer_data*> [#uses=2] + %tmp3.i35 = getelementptr %struct.layer_data* %tmp.i32, i32 0, i32 1, i32 2048; <i8*> [#uses=2] + %tmp.i36 = getelementptr %struct.layer_data* %tmp.i32, i32 0, i32 2 ; <i8**> [#uses=1] + store i8* %tmp3.i35, i8** %tmp.i36 + store i8* %tmp3.i35, i8** null + ret void } diff --git a/test/CodeGen/ARM/align.ll b/test/CodeGen/ARM/align.ll index 263f6ab..bb336ce 100644 --- a/test/CodeGen/ARM/align.ll +++ b/test/CodeGen/ARM/align.ll @@ -1,16 +1,15 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ -; RUN: grep align.*1 | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-linux-gnueabi | \ +; RUN: llvm-as < %s | llc -march=arm | grep align.*1 | count 1 +; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnueabi | \ ; RUN: grep align.*2 | count 2 -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-linux-gnueabi | \ +; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnueabi | \ ; RUN: grep align.*3 | count 2 -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-apple-darwin | \ +; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin | \ ; RUN: grep align.*2 | count 4 -%a = global bool true -%b = global sbyte 1 -%c = global short 2 -%d = global int 3 -%e = global long 4 -%f = global float 5.0 -%g = global double 6.0 +@a = global i1 true +@b = global i8 1 +@c = global i16 2 +@d = global i32 3 +@e = global i64 4 +@f = global float 5.0 +@g = global double 6.0 diff --git a/test/CodeGen/ARM/alloca.ll b/test/CodeGen/ARM/alloca.ll index b98a674..f7e450f 100644 --- a/test/CodeGen/ARM/alloca.ll +++ b/test/CodeGen/ARM/alloca.ll @@ -1,13 +1,13 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mtriple=arm-linux-gnu | \ +; RUN: llvm-as < %s | llc -march=arm -mtriple=arm-linux-gnu | \ ; RUN: grep {mov r11, sp} -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mtriple=arm-linux-gnu | \ +; RUN: llvm-as < %s | llc -march=arm -mtriple=arm-linux-gnu | \ ; RUN: grep {mov sp, r11} -void %f(uint %a) { +define void @f(i32 %a) { entry: - %tmp = alloca sbyte, uint %a - call void %g( sbyte* %tmp, uint %a, uint 1, uint 2, uint 3 ) - ret void + %tmp = alloca i8, i32 %a ; <i8*> [#uses=1] + call void @g( i8* %tmp, i32 %a, i32 1, i32 2, i32 3 ) + ret void } -declare void %g(sbyte*, uint, uint, uint, uint) +declare void @g(i8*, i32, i32, i32, i32) diff --git a/test/CodeGen/ARM/argaddr.ll b/test/CodeGen/ARM/argaddr.ll index a131721..080827d 100644 --- a/test/CodeGen/ARM/argaddr.ll +++ b/test/CodeGen/ARM/argaddr.ll @@ -1,18 +1,19 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -void %f(int %a, int %b, int %c, int %d, int %e) { +; RUN: llvm-as < %s | llc -march=arm + +define void @f(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) { entry: - %a_addr = alloca int ; <int*> [#uses=2] - %b_addr = alloca int ; <int*> [#uses=2] - %c_addr = alloca int ; <int*> [#uses=2] - %d_addr = alloca int ; <int*> [#uses=2] - %e_addr = alloca int ; <int*> [#uses=2] - store int %a, int* %a_addr - store int %b, int* %b_addr - store int %c, int* %c_addr - store int %d, int* %d_addr - store int %e, int* %e_addr - call void %g( int* %a_addr, int* %b_addr, int* %c_addr, int* %d_addr, int* %e_addr ) - ret void + %a_addr = alloca i32 ; <i32*> [#uses=2] + %b_addr = alloca i32 ; <i32*> [#uses=2] + %c_addr = alloca i32 ; <i32*> [#uses=2] + %d_addr = alloca i32 ; <i32*> [#uses=2] + %e_addr = alloca i32 ; <i32*> [#uses=2] + store i32 %a, i32* %a_addr + store i32 %b, i32* %b_addr + store i32 %c, i32* %c_addr + store i32 %d, i32* %d_addr + store i32 %e, i32* %e_addr + call void @g( i32* %a_addr, i32* %b_addr, i32* %c_addr, i32* %d_addr, i32* %e_addr ) + ret void } -declare void %g(int*, int*, int*, int*, int*) +declare void @g(i32*, i32*, i32*, i32*, i32*) diff --git a/test/CodeGen/ARM/arm-asm.ll b/test/CodeGen/ARM/arm-asm.ll index 6b8ce9a..b260b13 100644 --- a/test/CodeGen/ARM/arm-asm.ll +++ b/test/CodeGen/ARM/arm-asm.ll @@ -1,7 +1,7 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm +; RUN: llvm-as < %s | llc -march=arm -void %frame_dummy() { +define void @frame_dummy() { entry: - %tmp1 = tail call void (sbyte*)* (void (sbyte*)*)* asm "", "=r,0,~{dirflag},~{fpsr},~{flags}"( void (sbyte*)* null ) - ret void + %tmp1 = tail call void (i8*)* (void (i8*)*)* asm "", "=r,0,~{dirflag},~{fpsr},~{flags}"( void (i8*)* null ) ; <void (i8*)*> [#uses=0] + ret void } diff --git a/test/CodeGen/ARM/branch.ll b/test/CodeGen/ARM/branch.ll index 591beb2..7f6b183 100644 --- a/test/CodeGen/ARM/branch.ll +++ b/test/CodeGen/ARM/branch.ll @@ -1,57 +1,57 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -disable-arm-if-conversion > %t +; RUN: llvm-as < %s | llc -march=arm -disable-arm-if-conversion > %t ; RUN: grep bne %t ; RUN: grep bge %t ; RUN: grep bhs %t ; RUN: grep blo %t -void %f1(int %a, int %b, int* %v) { +define void @f1(i32 %a, i32 %b, i32* %v) { entry: - %tmp = seteq int %a, %b ; <bool> [#uses=1] - br bool %tmp, label %cond_true, label %return + %tmp = icmp eq i32 %a, %b ; <i1> [#uses=1] + br i1 %tmp, label %cond_true, label %return -cond_true: ; preds = %entry - store int 0, int* %v - ret void +cond_true: ; preds = %entry + store i32 0, i32* %v + ret void -return: ; preds = %entry - ret void +return: ; preds = %entry + ret void } -void %f2(int %a, int %b, int* %v) { +define void @f2(i32 %a, i32 %b, i32* %v) { entry: - %tmp = setlt int %a, %b ; <bool> [#uses=1] - br bool %tmp, label %cond_true, label %return + %tmp = icmp slt i32 %a, %b ; <i1> [#uses=1] + br i1 %tmp, label %cond_true, label %return -cond_true: ; preds = %entry - store int 0, int* %v - ret void +cond_true: ; preds = %entry + store i32 0, i32* %v + ret void -return: ; preds = %entry - ret void +return: ; preds = %entry + ret void } -void %f3(uint %a, uint %b, int* %v) { +define void @f3(i32 %a, i32 %b, i32* %v) { entry: - %tmp = setlt uint %a, %b ; <bool> [#uses=1] - br bool %tmp, label %cond_true, label %return + %tmp = icmp ult i32 %a, %b ; <i1> [#uses=1] + br i1 %tmp, label %cond_true, label %return -cond_true: ; preds = %entry - store int 0, int* %v - ret void +cond_true: ; preds = %entry + store i32 0, i32* %v + ret void -return: ; preds = %entry - ret void +return: ; preds = %entry + ret void } -void %f4(uint %a, uint %b, int* %v) { +define void @f4(i32 %a, i32 %b, i32* %v) { entry: - %tmp = setlt uint %a, %b ; <bool> [#uses=1] - br bool %tmp, label %return, label %cond_true + %tmp = icmp ult i32 %a, %b ; <i1> [#uses=1] + br i1 %tmp, label %return, label %cond_true -cond_true: ; preds = %entry - store int 0, int* %v - ret void +cond_true: ; preds = %entry + store i32 0, i32* %v + ret void -return: ; preds = %entry - ret void +return: ; preds = %entry + ret void } diff --git a/test/CodeGen/ARM/call.ll b/test/CodeGen/ARM/call.ll index c7e10b1..6b19665 100644 --- a/test/CodeGen/ARM/call.ll +++ b/test/CodeGen/ARM/call.ll @@ -1,18 +1,19 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep {mov lr, pc} -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v5t | grep blx -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mtriple=arm-linux-gnueabi\ +; RUN: llvm-as < %s | llc -march=arm | grep {mov lr, pc} +; RUN: llvm-as < %s | llc -march=arm -mattr=+v5t | grep blx +; RUN: llvm-as < %s | llc -march=arm -mtriple=arm-linux-gnueabi\ ; RUN: -relocation-model=pic | grep {PLT} -%t = weak global int ()* null -declare void %g(int, int, int, int) +@t = weak global i32 ()* null ; <i32 ()**> [#uses=1] -void %f() { - call void %g( int 1, int 2, int 3, int 4 ) - ret void +declare void @g(i32, i32, i32, i32) + +define void @f() { + call void @g( i32 1, i32 2, i32 3, i32 4 ) + ret void } -void %g() { - %tmp = load int ()** %t - %tmp = tail call int %tmp( ) - ret void +define void @g.upgrd.1() { + %tmp = load i32 ()** @t ; <i32 ()*> [#uses=1] + %tmp.upgrd.2 = tail call i32 %tmp( ) ; <i32> [#uses=0] + ret void } diff --git a/test/CodeGen/ARM/clz.ll b/test/CodeGen/ARM/clz.ll index cdde95a..389fb2c 100644 --- a/test/CodeGen/ARM/clz.ll +++ b/test/CodeGen/ARM/clz.ll @@ -1,8 +1,8 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v5t | grep clz +; RUN: llvm-as < %s | llc -march=arm -mattr=+v5t | grep clz -declare uint %llvm.ctlz.i32(uint) +declare i32 @llvm.ctlz.i32(i32) -uint %test(uint %x) { - %tmp.1 = call uint %llvm.ctlz.i32( uint %x ) - ret uint %tmp.1 +define i32 @test(i32 %x) { + %tmp.1 = call i32 @llvm.ctlz.i32( i32 %x ) ; <i32> [#uses=1] + ret i32 %tmp.1 } diff --git a/test/CodeGen/ARM/compare-call.ll b/test/CodeGen/ARM/compare-call.ll index 3fcded8..fcb8b17 100644 --- a/test/CodeGen/ARM/compare-call.ll +++ b/test/CodeGen/ARM/compare-call.ll @@ -1,20 +1,20 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v6,+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+v6,+vfp2 | \ ; RUN: grep fcmpes -void %test3(float* %glob, int %X) { +define void @test3(float* %glob, i32 %X) { entry: - %tmp = load float* %glob ; <float> [#uses=1] - %tmp2 = getelementptr float* %glob, int 2 ; <float*> [#uses=1] - %tmp3 = load float* %tmp2 ; <float> [#uses=1] - %tmp = setgt float %tmp, %tmp3 ; <bool> [#uses=1] - br bool %tmp, label %cond_true, label %UnifiedReturnBlock + %tmp = load float* %glob ; <float> [#uses=1] + %tmp2 = getelementptr float* %glob, i32 2 ; <float*> [#uses=1] + %tmp3 = load float* %tmp2 ; <float> [#uses=1] + %tmp.upgrd.1 = fcmp ogt float %tmp, %tmp3 ; <i1> [#uses=1] + br i1 %tmp.upgrd.1, label %cond_true, label %UnifiedReturnBlock -cond_true: ; preds = %entry - %tmp = tail call int (...)* %bar( ) ; <int> [#uses=0] - ret void +cond_true: ; preds = %entry + %tmp.upgrd.2 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0] + ret void -UnifiedReturnBlock: ; preds = %entry - ret void +UnifiedReturnBlock: ; preds = %entry + ret void } -declare int %bar(...) +declare i32 @bar(...) diff --git a/test/CodeGen/ARM/constants.ll b/test/CodeGen/ARM/constants.ll index 63cca24..2d86312 100644 --- a/test/CodeGen/ARM/constants.ll +++ b/test/CodeGen/ARM/constants.ll @@ -1,46 +1,42 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep {mov r0, #0} | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep {mov r0, #255$} | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep {mov r0.*256} | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ -; RUN: grep {orr.*256} | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ -; RUN: grep {mov r0, .*-1073741761} | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ -; RUN: grep {mov r0, .*1008} | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ -; RUN: grep {cmp r0, #1, 16} | count 1 - -uint %f1() { - ret uint 0 +; RUN: llvm-as < %s | llc -march=arm | grep {orr.*256} | count 1 +; RUN: llvm-as < %s | llc -march=arm | grep {mov r0, .*-1073741761} | count 1 +; RUN: llvm-as < %s | llc -march=arm | grep {mov r0, .*1008} | count 1 +; RUN: llvm-as < %s | llc -march=arm | grep {cmp r0, #1, 16} | count 1 + +define i32 @f1() { + ret i32 0 } -uint %f2() { - ret uint 255 +define i32 @f2() { + ret i32 255 } -uint %f3() { - ret uint 256 +define i32 @f3() { + ret i32 256 } -uint %f4() { - ret uint 257 +define i32 @f4() { + ret i32 257 } -uint %f5() { - ret uint 3221225535 +define i32 @f5() { + ret i32 -1073741761 } -uint %f6() { - ret uint 1008 +define i32 @f6() { + ret i32 1008 } -void %f7(uint %a) { - %b = setgt uint %a, 65536 - br bool %b, label %r, label %r +define void @f7(i32 %a) { + %b = icmp ugt i32 %a, 65536 ; <i1> [#uses=1] + br i1 %b, label %r, label %r -r: - ret void +r: ; preds = %0, %0 + ret void } diff --git a/test/CodeGen/ARM/ctors_dtors.ll b/test/CodeGen/ARM/ctors_dtors.ll index cf58ca4..5caa5b1 100644 --- a/test/CodeGen/ARM/ctors_dtors.ll +++ b/test/CodeGen/ARM/ctors_dtors.ll @@ -1,25 +1,25 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-apple-darwin | \ +; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin | \ ; RUN: grep {\\.mod_init_func} -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-apple-darwin | \ +; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin | \ ; RUN: grep {\\.mod_term_func} -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-linux-gnu | \ +; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnu | \ ; RUN: grep {\\.section \\.ctors,"aw",.progbits} -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-linux-gnu | \ +; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnu | \ ; RUN: grep {\\.section \\.dtors,"aw",.progbits} -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-linux-gnueabi | \ +; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnueabi | \ ; RUN: grep {\\.section \\.init_array,"aw",.init_array} -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-linux-gnueabi | \ +; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnueabi | \ ; RUN: grep {\\.section \\.fini_array,"aw",.fini_array} -%llvm.global_ctors = appending global [1 x { int, void ()* }] [ { int, void ()* } { int 65535, void ()* %__mf_init } ] ; <[1 x { int, void ()* }]*> [#uses=0] -%llvm.global_dtors = appending global [1 x { int, void ()* }] [ { int, void ()* } { int 65535, void ()* %__mf_fini } ] ; <[1 x { int, void ()* }]*> [#uses=0] +@llvm.global_ctors = appending global [1 x { i32, void ()* }] [ { i32, void ()* } { i32 65535, void ()* @__mf_init } ] ; <[1 x { i32, void ()* }]*> [#uses=0] +@llvm.global_dtors = appending global [1 x { i32, void ()* }] [ { i32, void ()* } { i32 65535, void ()* @__mf_fini } ] ; <[1 x { i32, void ()* }]*> [#uses=0] -void %__mf_init() { +define void @__mf_init() { entry: - ret void + ret void } -void %__mf_fini() { +define void @__mf_fini() { entry: - ret void + ret void } diff --git a/test/CodeGen/ARM/div.ll b/test/CodeGen/ARM/div.ll index 3f8a752..1085ec7 100644 --- a/test/CodeGen/ARM/div.ll +++ b/test/CodeGen/ARM/div.ll @@ -1,29 +1,30 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm > %t +; RUN: llvm-as < %s | llc -march=arm > %t ; RUN: grep __divsi3 %t ; RUN: grep __udivsi3 %t ; RUN: grep __modsi3 %t ; RUN: grep __umodsi3 %t -int %f1(int %a, int %b) { +define i32 @f1(i32 %a, i32 %b) { entry: - %tmp1 = div int %a, %b - ret int %tmp1 + %tmp1 = sdiv i32 %a, %b ; <i32> [#uses=1] + ret i32 %tmp1 } -uint %f2(uint %a, uint %b) { +define i32 @f2(i32 %a, i32 %b) { entry: - %tmp1 = div uint %a, %b - ret uint %tmp1 + %tmp1 = udiv i32 %a, %b ; <i32> [#uses=1] + ret i32 %tmp1 } -int %f3(int %a, int %b) { +define i32 @f3(i32 %a, i32 %b) { entry: - %tmp1 = rem int %a, %b - ret int %tmp1 + %tmp1 = srem i32 %a, %b ; <i32> [#uses=1] + ret i32 %tmp1 } -uint %f4(uint %a, uint %b) { +define i32 @f4(i32 %a, i32 %b) { entry: - %tmp1 = rem uint %a, %b - ret uint %tmp1 + %tmp1 = urem i32 %a, %b ; <i32> [#uses=1] + ret i32 %tmp1 } + diff --git a/test/CodeGen/ARM/extloadi1.ll b/test/CodeGen/ARM/extloadi1.ll index b4dcd7f..2e9041c 100644 --- a/test/CodeGen/ARM/extloadi1.ll +++ b/test/CodeGen/ARM/extloadi1.ll @@ -1,22 +1,20 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm +; RUN: llvm-as < %s | llc -march=arm +@handler_installed.6144.b = external global i1 ; <i1*> [#uses=1] -%handler_installed.6144.b = external global bool ; <bool*> [#uses=1] - - -void %__mf_sigusr1_respond() { +define void @__mf_sigusr1_respond() { entry: - %tmp8.b = load bool* %handler_installed.6144.b ; <bool> [#uses=1] - br bool false, label %cond_true7, label %cond_next + %tmp8.b = load i1* @handler_installed.6144.b ; <i1> [#uses=1] + br i1 false, label %cond_true7, label %cond_next -cond_next: ; preds = %entry - br bool %tmp8.b, label %bb, label %cond_next3 +cond_next: ; preds = %entry + br i1 %tmp8.b, label %bb, label %cond_next3 -cond_next3: ; preds = %cond_next - ret void +cond_next3: ; preds = %cond_next + ret void -bb: ; preds = %cond_next - ret void +bb: ; preds = %cond_next + ret void -cond_true7: ; preds = %entry - ret void +cond_true7: ; preds = %entry + ret void } diff --git a/test/CodeGen/ARM/fp.ll b/test/CodeGen/ARM/fp.ll index ec4890f..ba199db 100644 --- a/test/CodeGen/ARM/fp.ll +++ b/test/CodeGen/ARM/fp.ll @@ -1,4 +1,4 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 > %t +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 > %t ; RUN: grep fmsr %t | count 4 ; RUN: grep fsitos %t ; RUN: grep fmrs %t | count 2 @@ -10,52 +10,53 @@ ; RUN: grep fuitos %t ; RUN: grep 1065353216 %t -float %f(int %a) { +define float @f(i32 %a) { entry: - %tmp = cast int %a to float ; <float> [#uses=1] - ret float %tmp + %tmp = sitofp i32 %a to float ; <float> [#uses=1] + ret float %tmp } -double %g(int %a) { +define double @g(i32 %a) { entry: - %tmp = cast int %a to double ; <double> [#uses=1] + %tmp = sitofp i32 %a to double ; <double> [#uses=1] ret double %tmp } -double %uint_to_double(uint %a) { +define double @uint_to_double(i32 %a) { entry: - %tmp = cast uint %a to double - ret double %tmp + %tmp = uitofp i32 %a to double ; <double> [#uses=1] + ret double %tmp } -float %uint_to_float(uint %a) { +define float @uint_to_float(i32 %a) { entry: - %tmp = cast uint %a to float - ret float %tmp + %tmp = uitofp i32 %a to float ; <float> [#uses=1] + ret float %tmp } - -double %h(double* %v) { +define double @h(double* %v) { entry: - %tmp = load double* %v ; <double> [#uses=1] - ret double %tmp + %tmp = load double* %v ; <double> [#uses=1] + ret double %tmp } -float %h2() { +define float @h2() { entry: ret float 1.000000e+00 } -double %f2(double %a) { +define double @f2(double %a) { ret double %a } -void %f3() { +define void @f3() { entry: - %tmp = call double %f5() ; <double> [#uses=1] - call void %f4(double %tmp ) - ret void + %tmp = call double @f5( ) ; <double> [#uses=1] + call void @f4( double %tmp ) + ret void } -declare void %f4(double) -declare double %f5() +declare void @f4(double) + +declare double @f5() + diff --git a/test/CodeGen/ARM/fparith.ll b/test/CodeGen/ARM/fparith.ll index 6dfe860..11933d5 100644 --- a/test/CodeGen/ARM/fparith.ll +++ b/test/CodeGen/ARM/fparith.ll @@ -1,4 +1,4 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 > %t +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 > %t ; RUN: grep fadds %t ; RUN: grep faddd %t ; RUN: grep fmuls %t @@ -8,79 +8,78 @@ ; RUN: grep fdivs %t ; RUN: grep fdivd %t - -float %f1(float %a, float %b) { +define float @f1(float %a, float %b) { entry: - %tmp = add float %a, %b + %tmp = add float %a, %b ; <float> [#uses=1] ret float %tmp } -double %f2(double %a, double %b) { +define double @f2(double %a, double %b) { entry: - %tmp = add double %a, %b + %tmp = add double %a, %b ; <double> [#uses=1] ret double %tmp } -float %f3(float %a, float %b) { +define float @f3(float %a, float %b) { entry: - %tmp = mul float %a, %b + %tmp = mul float %a, %b ; <float> [#uses=1] ret float %tmp } -double %f4(double %a, double %b) { +define double @f4(double %a, double %b) { entry: - %tmp = mul double %a, %b + %tmp = mul double %a, %b ; <double> [#uses=1] ret double %tmp } -float %f5(float %a, float %b) { +define float @f5(float %a, float %b) { entry: - %tmp = sub float %a, %b + %tmp = sub float %a, %b ; <float> [#uses=1] ret float %tmp } -double %f6(double %a, double %b) { +define double @f6(double %a, double %b) { entry: - %tmp = sub double %a, %b + %tmp = sub double %a, %b ; <double> [#uses=1] ret double %tmp } -float %f7(float %a) { +define float @f7(float %a) { entry: - %tmp1 = sub float -0.000000e+00, %a + %tmp1 = sub float -0.000000e+00, %a ; <float> [#uses=1] ret float %tmp1 } -double %f8(double %a) { +define double @f8(double %a) { entry: - %tmp1 = sub double -0.000000e+00, %a + %tmp1 = sub double -0.000000e+00, %a ; <double> [#uses=1] ret double %tmp1 } -float %f9(float %a, float %b) { +define float @f9(float %a, float %b) { entry: - %tmp1 = div float %a, %b + %tmp1 = fdiv float %a, %b ; <float> [#uses=1] ret float %tmp1 } -double %f10(double %a, double %b) { +define double @f10(double %a, double %b) { entry: - %tmp1 = div double %a, %b + %tmp1 = fdiv double %a, %b ; <double> [#uses=1] ret double %tmp1 } -float %f11(float %a) { +define float @f11(float %a) { entry: - %tmp1 = call float %fabsf(float %a) + %tmp1 = call float @fabsf( float %a ) ; <float> [#uses=1] ret float %tmp1 } -declare float %fabsf(float) +declare float @fabsf(float) -double %f12(double %a) { +define double @f12(double %a) { entry: - %tmp1 = call double %fabs(double %a) + %tmp1 = call double @fabs( double %a ) ; <double> [#uses=1] ret double %tmp1 } -declare double %fabs(double) +declare double @fabs(double) diff --git a/test/CodeGen/ARM/fpcmp.ll b/test/CodeGen/ARM/fpcmp.ll index f16c9d4..ce0f402 100644 --- a/test/CodeGen/ARM/fpcmp.ll +++ b/test/CodeGen/ARM/fpcmp.ll @@ -1,4 +1,4 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 > %t +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 > %t ; RUN: grep movmi %t ; RUN: grep moveq %t ; RUN: grep movgt %t @@ -7,51 +7,51 @@ ; RUN: grep fcmped %t | count 1 ; RUN: grep fcmpes %t | count 6 -int %f1(float %a) { +define i32 @f1(float %a) { entry: - %tmp = setlt float %a, 1.000000e+00 ; <bool> [#uses=1] - %tmp = cast bool %tmp to int ; <int> [#uses=1] - ret int %tmp + %tmp = fcmp olt float %a, 1.000000e+00 ; <i1> [#uses=1] + %tmp1 = zext i1 %tmp to i32 ; <i32> [#uses=1] + ret i32 %tmp1 } -int %f2(float %a) { +define i32 @f2(float %a) { entry: - %tmp = seteq float %a, 1.000000e+00 ; <bool> [#uses=1] - %tmp = cast bool %tmp to int ; <int> [#uses=1] - ret int %tmp + %tmp = fcmp oeq float %a, 1.000000e+00 ; <i1> [#uses=1] + %tmp2 = zext i1 %tmp to i32 ; <i32> [#uses=1] + ret i32 %tmp2 } -int %f3(float %a) { +define i32 @f3(float %a) { entry: - %tmp = setgt float %a, 1.000000e+00 ; <bool> [#uses=1] - %tmp = cast bool %tmp to int ; <int> [#uses=1] - ret int %tmp + %tmp = fcmp ogt float %a, 1.000000e+00 ; <i1> [#uses=1] + %tmp3 = zext i1 %tmp to i32 ; <i32> [#uses=1] + ret i32 %tmp3 } -int %f4(float %a) { +define i32 @f4(float %a) { entry: - %tmp = setge float %a, 1.000000e+00 ; <bool> [#uses=1] - %tmp = cast bool %tmp to int ; <int> [#uses=1] - ret int %tmp + %tmp = fcmp oge float %a, 1.000000e+00 ; <i1> [#uses=1] + %tmp4 = zext i1 %tmp to i32 ; <i32> [#uses=1] + ret i32 %tmp4 } -int %f5(float %a) { +define i32 @f5(float %a) { entry: - %tmp = setle float %a, 1.000000e+00 ; <bool> [#uses=1] - %tmp = cast bool %tmp to int ; <int> [#uses=1] - ret int %tmp + %tmp = fcmp ole float %a, 1.000000e+00 ; <i1> [#uses=1] + %tmp5 = zext i1 %tmp to i32 ; <i32> [#uses=1] + ret i32 %tmp5 } -int %f6(float %a) { +define i32 @f6(float %a) { entry: - %tmp = setne float %a, 1.000000e+00 ; <bool> [#uses=1] - %tmp = cast bool %tmp to int ; <int> [#uses=1] - ret int %tmp + %tmp = fcmp une float %a, 1.000000e+00 ; <i1> [#uses=1] + %tmp6 = zext i1 %tmp to i32 ; <i32> [#uses=1] + ret i32 %tmp6 } -int %g1(double %a) { +define i32 @g1(double %a) { entry: - %tmp = setlt double %a, 1.000000e+00 ; <bool> [#uses=1] - %tmp = cast bool %tmp to int ; <int> [#uses=1] - ret int %tmp + %tmp = fcmp olt double %a, 1.000000e+00 ; <i1> [#uses=1] + %tmp7 = zext i1 %tmp to i32 ; <i32> [#uses=1] + ret i32 %tmp7 } diff --git a/test/CodeGen/ARM/fpconv.ll b/test/CodeGen/ARM/fpconv.ll index 06e8069..fd13227 100644 --- a/test/CodeGen/ARM/fpconv.ll +++ b/test/CodeGen/ARM/fpconv.ll @@ -1,4 +1,4 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 > %t +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 > %t ; RUN: grep fcvtsd %t ; RUN: grep fcvtds %t ; RUN: grep ftosizs %t @@ -10,62 +10,62 @@ ; RUN: grep fuitos %t ; RUN: grep fuitod %t -float %f1(double %x) { +define float @f1(double %x) { entry: - %tmp1 = cast double %x to float + %tmp1 = fptrunc double %x to float ; <float> [#uses=1] ret float %tmp1 } -double %f2(float %x) { +define double @f2(float %x) { entry: - %tmp1 = cast float %x to double + %tmp1 = fpext float %x to double ; <double> [#uses=1] ret double %tmp1 } -int %f3(float %x) { +define i32 @f3(float %x) { entry: - %tmp = cast float %x to int - ret int %tmp + %tmp = fptosi float %x to i32 ; <i32> [#uses=1] + ret i32 %tmp } -uint %f4(float %x) { +define i32 @f4(float %x) { entry: - %tmp = cast float %x to uint - ret uint %tmp + %tmp = fptoui float %x to i32 ; <i32> [#uses=1] + ret i32 %tmp } -int %f5(double %x) { +define i32 @f5(double %x) { entry: - %tmp = cast double %x to int - ret int %tmp + %tmp = fptosi double %x to i32 ; <i32> [#uses=1] + ret i32 %tmp } -uint %f6(double %x) { +define i32 @f6(double %x) { entry: - %tmp = cast double %x to uint - ret uint %tmp + %tmp = fptoui double %x to i32 ; <i32> [#uses=1] + ret i32 %tmp } -float %f7(int %a) { +define float @f7(i32 %a) { entry: - %tmp = cast int %a to float + %tmp = sitofp i32 %a to float ; <float> [#uses=1] ret float %tmp } -double %f8(int %a) { +define double @f8(i32 %a) { entry: - %tmp = cast int %a to double - ret double %tmp + %tmp = sitofp i32 %a to double ; <double> [#uses=1] + ret double %tmp } -float %f9(uint %a) { +define float @f9(i32 %a) { entry: - %tmp = cast uint %a to float + %tmp = uitofp i32 %a to float ; <float> [#uses=1] ret float %tmp } -double %f10(uint %a) { +define double @f10(i32 %a) { entry: - %tmp = cast uint %a to double + %tmp = uitofp i32 %a to double ; <double> [#uses=1] ret double %tmp } diff --git a/test/CodeGen/ARM/fpmem.ll b/test/CodeGen/ARM/fpmem.ll index 936d6fd..48204ec 100644 --- a/test/CodeGen/ARM/fpmem.ll +++ b/test/CodeGen/ARM/fpmem.ll @@ -1,22 +1,22 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep {mov r0, #0} | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep {flds.*\\\[} | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep {fsts.*\\\[} | count 1 -float %f1(float %a) { - ret float 0.000000e+00 +define float @f1(float %a) { + ret float 0.000000e+00 } -float %f2(float* %v, float %u) { - %tmp = load float* %v - %tmp1 = add float %tmp, %u - ret float %tmp1 +define float @f2(float* %v, float %u) { + %tmp = load float* %v ; <float> [#uses=1] + %tmp1 = add float %tmp, %u ; <float> [#uses=1] + ret float %tmp1 } -void %f3(float %a, float %b, float* %v) { - %tmp = add float %a, %b - store float %tmp, float* %v - ret void +define void @f3(float %a, float %b, float* %v) { + %tmp = add float %a, %b ; <float> [#uses=1] + store float %tmp, float* %v + ret void } diff --git a/test/CodeGen/ARM/imm.ll b/test/CodeGen/ARM/imm.ll index 31db7a3..998adba 100644 --- a/test/CodeGen/ARM/imm.ll +++ b/test/CodeGen/ARM/imm.ll @@ -1,17 +1,16 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | not grep CPI +; RUN: llvm-as < %s | llc -march=arm | not grep CPI -int %test1(int %A) { - %B = add int %A, -268435441 ; 0xF000000F - ret int %B +define i32 @test1(i32 %A) { + %B = add i32 %A, -268435441 ; <i32> [#uses=1] + ret i32 %B } -int %test2() { - ret int 65533 +define i32 @test2() { + ret i32 65533 } -int %test3(int %A) { - %B = or int %A, 65533 - ret int %B +define i32 @test3(i32 %A) { + %B = or i32 %A, 65533 ; <i32> [#uses=1] + ret i32 %B } - diff --git a/test/CodeGen/ARM/insn-sched1.ll b/test/CodeGen/ARM/insn-sched1.ll index a993e65..f203443 100644 --- a/test/CodeGen/ARM/insn-sched1.ll +++ b/test/CodeGen/ARM/insn-sched1.ll @@ -1,11 +1,11 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v6 -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-apple-darwin -mattr=+v6 |\ +; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 +; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin -mattr=+v6 |\ ; RUN: grep mov | count 3 -int %test(int %x) { - %tmp = cast int %x to short - %tmp2 = tail call int %f( int 1, short %tmp ) - ret int %tmp2 +define i32 @test(i32 %x) { + %tmp = trunc i32 %x to i16 ; <i16> [#uses=1] + %tmp2 = tail call i32 @f( i32 1, i16 %tmp ) ; <i32> [#uses=1] + ret i32 %tmp2 } -declare int %f(int, short) +declare i32 @f(i32, i16) diff --git a/test/CodeGen/ARM/ldm.ll b/test/CodeGen/ARM/ldm.ll index 4691725..6a05457 100644 --- a/test/CodeGen/ARM/ldm.ll +++ b/test/CodeGen/ARM/ldm.ll @@ -1,34 +1,35 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep ldmia | count 2 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep ldmib | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-apple-darwin | \ +; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin | \ ; RUN: grep {ldmfd sp\!} | count 3 -%X = external global [0 x int] +@X = external global [0 x i32] ; <[0 x i32]*> [#uses=5] -int %t1() { - %tmp = load int* getelementptr ([0 x int]* %X, int 0, int 0) - %tmp3 = load int* getelementptr ([0 x int]* %X, int 0, int 1) - %tmp4 = tail call int %f1( int %tmp, int %tmp3 ) - ret int %tmp4 +define i32 @t1() { + %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 0) ; <i32> [#uses=1] + %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1] + %tmp4 = tail call i32 @f1( i32 %tmp, i32 %tmp3 ) ; <i32> [#uses=1] + ret i32 %tmp4 } -int %t2() { - %tmp = load int* getelementptr ([0 x int]* %X, int 0, int 2) - %tmp3 = load int* getelementptr ([0 x int]* %X, int 0, int 3) - %tmp5 = load int* getelementptr ([0 x int]* %X, int 0, int 4) - %tmp6 = tail call int %f2( int %tmp, int %tmp3, int %tmp5 ) - ret int %tmp6 +define i32 @t2() { + %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1] + %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1] + %tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 4) ; <i32> [#uses=1] + %tmp6 = tail call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 ) ; <i32> [#uses=1] + ret i32 %tmp6 } -int %t3() { - %tmp = load int* getelementptr ([0 x int]* %X, int 0, int 1) - %tmp3 = load int* getelementptr ([0 x int]* %X, int 0, int 2) - %tmp5 = load int* getelementptr ([0 x int]* %X, int 0, int 3) - %tmp6 = tail call int %f2( int %tmp, int %tmp3, int %tmp5 ) - ret int %tmp6 +define i32 @t3() { + %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1] + %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1] + %tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1] + %tmp6 = tail call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 ) ; <i32> [#uses=1] + ret i32 %tmp6 } -declare int %f1(int, int) -declare int %f2(int, int, int) +declare i32 @f1(i32, i32) + +declare i32 @f2(i32, i32, i32) diff --git a/test/CodeGen/ARM/ldr.ll b/test/CodeGen/ARM/ldr.ll index 833668c..23c0b99 100644 --- a/test/CodeGen/ARM/ldr.ll +++ b/test/CodeGen/ARM/ldr.ll @@ -1,22 +1,23 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep {ldr r0} | count 3 -int %f1(int* %v) { +define i32 @f1(i32* %v) { entry: - %tmp = load int* %v ; <int> [#uses=1] - ret int %tmp + %tmp = load i32* %v ; <i32> [#uses=1] + ret i32 %tmp } -int %f2(int* %v) { +define i32 @f2(i32* %v) { entry: - %tmp2 = getelementptr int* %v, int 1023 ; <int*> [#uses=1] - %tmp = load int* %tmp2 ; <int> [#uses=1] - ret int %tmp + %tmp2 = getelementptr i32* %v, i32 1023 ; <i32*> [#uses=1] + %tmp = load i32* %tmp2 ; <i32> [#uses=1] + ret i32 %tmp } -int %f3(int* %v) { +define i32 @f3(i32* %v) { entry: - %tmp2 = getelementptr int* %v, int 1024 ; <int*> [#uses=1] - %tmp = load int* %tmp2 ; <int> [#uses=1] - ret int %tmp + %tmp2 = getelementptr i32* %v, i32 1024 ; <i32*> [#uses=1] + %tmp = load i32* %tmp2 ; <i32> [#uses=1] + ret i32 %tmp } + diff --git a/test/CodeGen/ARM/ldr_post.ll b/test/CodeGen/ARM/ldr_post.ll index a1b31ec..0491563 100644 --- a/test/CodeGen/ARM/ldr_post.ll +++ b/test/CodeGen/ARM/ldr_post.ll @@ -1,11 +1,12 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep {ldr.*\\\[.*\],} | count 1 -int %test(int %a, int %b, int %c) { - %tmp1 = mul int %a, %b - %tmp2 = cast int %tmp1 to int* - %tmp3 = load int* %tmp2 - %tmp4 = sub int %tmp1, %c - %tmp5 = mul int %tmp4, %tmp3 - ret int %tmp5 +define i32 @test(i32 %a, i32 %b, i32 %c) { + %tmp1 = mul i32 %a, %b ; <i32> [#uses=2] + %tmp2 = inttoptr i32 %tmp1 to i32* ; <i32*> [#uses=1] + %tmp3 = load i32* %tmp2 ; <i32> [#uses=1] + %tmp4 = sub i32 %tmp1, %c ; <i32> [#uses=1] + %tmp5 = mul i32 %tmp4, %tmp3 ; <i32> [#uses=1] + ret i32 %tmp5 } + diff --git a/test/CodeGen/ARM/ldr_pre.ll b/test/CodeGen/ARM/ldr_pre.ll index 4bc1fcc..7e44742 100644 --- a/test/CodeGen/ARM/ldr_pre.ll +++ b/test/CodeGen/ARM/ldr_pre.ll @@ -1,18 +1,19 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep {ldr.*\\!} | count 2 -int *%test1(int *%X, int *%dest) { - %Y = getelementptr int* %X, int 4 - %A = load int* %Y - store int %A, int* %dest - ret int* %Y +define i32* @test1(i32* %X, i32* %dest) { + %Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2] + %A = load i32* %Y ; <i32> [#uses=1] + store i32 %A, i32* %dest + ret i32* %Y } -int %test2(int %a, int %b, int %c) { - %tmp1 = sub int %a, %b - %tmp2 = cast int %tmp1 to int* - %tmp3 = load int* %tmp2 - %tmp4 = sub int %tmp1, %c - %tmp5 = add int %tmp4, %tmp3 - ret int %tmp5 +define i32 @test2(i32 %a, i32 %b, i32 %c) { + %tmp1 = sub i32 %a, %b ; <i32> [#uses=2] + %tmp2 = inttoptr i32 %tmp1 to i32* ; <i32*> [#uses=1] + %tmp3 = load i32* %tmp2 ; <i32> [#uses=1] + %tmp4 = sub i32 %tmp1, %c ; <i32> [#uses=1] + %tmp5 = add i32 %tmp4, %tmp3 ; <i32> [#uses=1] + ret i32 %tmp5 } + diff --git a/test/CodeGen/ARM/load.ll b/test/CodeGen/ARM/load.ll index f3d6cf6..0509732 100644 --- a/test/CodeGen/ARM/load.ll +++ b/test/CodeGen/ARM/load.ll @@ -1,33 +1,34 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm > %t +; RUN: llvm-as < %s | llc -march=arm > %t ; RUN: grep ldrsb %t ; RUN: grep ldrb %t ; RUN: grep ldrsh %t ; RUN: grep ldrh %t -int %f1(sbyte* %p) { + +define i32 @f1(i8* %p) { entry: - %tmp = load sbyte* %p ; <sbyte> [#uses=1] - %tmp = cast sbyte %tmp to int ; <int> [#uses=1] - ret int %tmp + %tmp = load i8* %p ; <i8> [#uses=1] + %tmp1 = sext i8 %tmp to i32 ; <i32> [#uses=1] + ret i32 %tmp1 } -int %f2(ubyte* %p) { +define i32 @f2(i8* %p) { entry: - %tmp = load ubyte* %p ; <sbyte> [#uses=1] - %tmp = cast ubyte %tmp to int ; <int> [#uses=1] - ret int %tmp + %tmp = load i8* %p ; <i8> [#uses=1] + %tmp2 = zext i8 %tmp to i32 ; <i32> [#uses=1] + ret i32 %tmp2 } -int %f3(short* %p) { +define i32 @f3(i16* %p) { entry: - %tmp = load short* %p ; <sbyte> [#uses=1] - %tmp = cast short %tmp to int ; <int> [#uses=1] - ret int %tmp + %tmp = load i16* %p ; <i16> [#uses=1] + %tmp3 = sext i16 %tmp to i32 ; <i32> [#uses=1] + ret i32 %tmp3 } -int %f4(ushort* %p) { +define i32 @f4(i16* %p) { entry: - %tmp = load ushort* %p ; <sbyte> [#uses=1] - %tmp = cast ushort %tmp to int ; <int> [#uses=1] - ret int %tmp + %tmp = load i16* %p ; <i16> [#uses=1] + %tmp4 = zext i16 %tmp to i32 ; <i32> [#uses=1] + ret i32 %tmp4 } diff --git a/test/CodeGen/ARM/long.ll b/test/CodeGen/ARM/long.ll index fc05d1e..53798ed 100644 --- a/test/CodeGen/ARM/long.ll +++ b/test/CodeGen/ARM/long.ll @@ -1,86 +1,87 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep -- {-2147483648} | count 3 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep mvn | count 3 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep adds | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep adc | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep {subs } | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep sbc | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | grep mvn | count 3 +; RUN: llvm-as < %s | llc -march=arm | grep adds | count 1 +; RUN: llvm-as < %s | llc -march=arm | grep adc | count 1 +; RUN: llvm-as < %s | llc -march=arm | grep {subs } | count 1 +; RUN: llvm-as < %s | llc -march=arm | grep sbc | count 1 +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep smull | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep umull | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=thumb | \ +; RUN: llvm-as < %s | llc -march=thumb | \ ; RUN: grep mvn | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=thumb | \ +; RUN: llvm-as < %s | llc -march=thumb | \ ; RUN: grep adc | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=thumb | \ +; RUN: llvm-as < %s | llc -march=thumb | \ ; RUN: grep sbc | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=thumb | grep __muldi3 -; END. +; RUN: llvm-as < %s | llc -march=thumb | grep __muldi3 -long %f1() { +define i64 @f1() { entry: - ret long 0 + ret i64 0 } -long %f2() { +define i64 @f2() { entry: - ret long 1 + ret i64 1 } -long %f3() { +define i64 @f3() { entry: - ret long 2147483647 + ret i64 2147483647 } -long %f4() { +define i64 @f4() { entry: - ret long 2147483648 + ret i64 2147483648 } -long %f5() { +define i64 @f5() { entry: - ret long 9223372036854775807 + ret i64 9223372036854775807 } -ulong %f6(ulong %x, ulong %y) { +define i64 @f6(i64 %x, i64 %y) { entry: - %tmp1 = add ulong %y, 1 - ret ulong %tmp1 + %tmp1 = add i64 %y, 1 ; <i64> [#uses=1] + ret i64 %tmp1 } -void %f7() { +define void @f7() { entry: - %tmp = call long %f8() - ret void + %tmp = call i64 @f8( ) ; <i64> [#uses=0] + ret void } -declare long %f8() -long %f9(long %a, long %b) { +declare i64 @f8() + +define i64 @f9(i64 %a, i64 %b) { entry: - %tmp = sub long %a, %b - ret long %tmp + %tmp = sub i64 %a, %b ; <i64> [#uses=1] + ret i64 %tmp } -long %f(int %a, int %b) { +define i64 @f(i32 %a, i32 %b) { entry: - %tmp = cast int %a to long - %tmp1 = cast int %b to long - %tmp2 = mul long %tmp1, %tmp - ret long %tmp2 + %tmp = sext i32 %a to i64 ; <i64> [#uses=1] + %tmp1 = sext i32 %b to i64 ; <i64> [#uses=1] + %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1] + ret i64 %tmp2 } -ulong %g(uint %a, uint %b) { +define i64 @g(i32 %a, i32 %b) { entry: - %tmp = cast uint %a to ulong - %tmp1 = cast uint %b to ulong - %tmp2 = mul ulong %tmp1, %tmp - ret ulong %tmp2 + %tmp = zext i32 %a to i64 ; <i64> [#uses=1] + %tmp1 = zext i32 %b to i64 ; <i64> [#uses=1] + %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1] + ret i64 %tmp2 } -ulong %f10() { +define i64 @f10() { entry: - %a = alloca ulong, align 8 - %retval = load ulong* %a - ret ulong %retval + %a = alloca i64, align 8 ; <i64*> [#uses=1] + %retval = load i64* %a ; <i64> [#uses=1] + ret i64 %retval } + diff --git a/test/CodeGen/ARM/mem.ll b/test/CodeGen/ARM/mem.ll index d598d47..e983165 100644 --- a/test/CodeGen/ARM/mem.ll +++ b/test/CodeGen/ARM/mem.ll @@ -1,14 +1,14 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep strb -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep strh +; RUN: llvm-as < %s | llc -march=arm | grep strb +; RUN: llvm-as < %s | llc -march=arm | grep strh -void %f1() { +define void @f1() { entry: - store ubyte 0, ubyte* null - ret void + store i8 0, i8* null + ret void } -void %f2() { +define void @f2() { entry: - store short 0, short* null - ret void + store i16 0, i16* null + ret void } diff --git a/test/CodeGen/ARM/memfunc.ll b/test/CodeGen/ARM/memfunc.ll index 1b41010..0b58bf6 100644 --- a/test/CodeGen/ARM/memfunc.ll +++ b/test/CodeGen/ARM/memfunc.ll @@ -1,13 +1,16 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm +; RUN: llvm-as < %s | llc -march=arm -void %f() { +define void @f() { entry: - call void %llvm.memmove.i32( sbyte* null, sbyte* null, uint 64, uint 0 ) - call void %llvm.memcpy.i32( sbyte* null, sbyte* null, uint 64, uint 0 ) - call void %llvm.memset.i32( sbyte* null, ubyte 64, uint 0, uint 0 ) - unreachable + call void @llvm.memmove.i32( i8* null, i8* null, i32 64, i32 0 ) + call void @llvm.memcpy.i32( i8* null, i8* null, i32 64, i32 0 ) + call void @llvm.memset.i32( i8* null, i8 64, i32 0, i32 0 ) + unreachable } -declare void %llvm.memmove.i32(sbyte*, sbyte*, uint, uint) -declare void %llvm.memcpy.i32(sbyte*, sbyte*, uint, uint) -declare void %llvm.memset.i32(sbyte*, ubyte, uint, uint) +declare void @llvm.memmove.i32(i8*, i8*, i32, i32) + +declare void @llvm.memcpy.i32(i8*, i8*, i32, i32) + +declare void @llvm.memset.i32(i8*, i8, i32, i32) + diff --git a/test/CodeGen/ARM/mulhi.ll b/test/CodeGen/ARM/mulhi.ll index bed3dba2..de75e96 100644 --- a/test/CodeGen/ARM/mulhi.ll +++ b/test/CodeGen/ARM/mulhi.ll @@ -1,23 +1,22 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v6 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v6 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 +; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 | \ ; RUN: grep smmul | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep umull | count 1 +; RUN: llvm-as < %s | llc -march=arm | grep umull | count 1 -int %smulhi(int %x, int %y) { - %tmp = cast int %x to ulong ; <ulong> [#uses=1] - %tmp1 = cast int %y to ulong ; <ulong> [#uses=1] - %tmp2 = mul ulong %tmp1, %tmp ; <ulong> [#uses=1] - %tmp3 = shr ulong %tmp2, ubyte 32 ; <ulong> [#uses=1] - %tmp3 = cast ulong %tmp3 to int ; <int> [#uses=1] - ret int %tmp3 +define i32 @smulhi(i32 %x, i32 %y) { + %tmp = sext i32 %x to i64 ; <i64> [#uses=1] + %tmp1 = sext i32 %y to i64 ; <i64> [#uses=1] + %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1] + %tmp3 = lshr i64 %tmp2, 32 ; <i64> [#uses=1] + %tmp3.upgrd.1 = trunc i64 %tmp3 to i32 ; <i32> [#uses=1] + ret i32 %tmp3.upgrd.1 } -int %umulhi(uint %x, uint %y) { - %tmp = cast uint %x to ulong ; <ulong> [#uses=1] - %tmp1 = cast uint %y to ulong ; <ulong> [#uses=1] - %tmp2 = mul ulong %tmp1, %tmp ; <ulong> [#uses=1] - %tmp3 = shr ulong %tmp2, ubyte 32 ; <ulong> [#uses=1] - %tmp3 = cast ulong %tmp3 to int ; <int> [#uses=1] - ret int %tmp3 +define i32 @umulhi(i32 %x, i32 %y) { + %tmp = zext i32 %x to i64 ; <i64> [#uses=1] + %tmp1 = zext i32 %y to i64 ; <i64> [#uses=1] + %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1] + %tmp3 = lshr i64 %tmp2, 32 ; <i64> [#uses=1] + %tmp3.upgrd.2 = trunc i64 %tmp3 to i32 ; <i32> [#uses=1] + ret i32 %tmp3.upgrd.2 } - diff --git a/test/CodeGen/ARM/mvn.ll b/test/CodeGen/ARM/mvn.ll index 0672a3b..a7ef907 100644 --- a/test/CodeGen/ARM/mvn.ll +++ b/test/CodeGen/ARM/mvn.ll @@ -1,72 +1,74 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep mvn | count 8 -; END. +; RUN: llvm-as < %s | llc -march=arm | grep mvn | count 8 -int %f1() { +define i32 @f1() { entry: - ret int -1 + ret i32 -1 } -int %f2(int %a) { +define i32 @f2(i32 %a) { entry: - %tmpnot = xor int %a, -1 ; <int> [#uses=1] - ret int %tmpnot + %tmpnot = xor i32 %a, -1 ; <i32> [#uses=1] + ret i32 %tmpnot } -int %f3(int %a) { +define i32 @f3(i32 %a) { entry: - %tmp1 = shl int %a, ubyte 2 ; <int> [#uses=1] - %tmp1not = xor int %tmp1, -1 ; <int> [#uses=1] - ret int %tmp1not + %tmp1 = shl i32 %a, 2 ; <i32> [#uses=1] + %tmp1not = xor i32 %tmp1, -1 ; <i32> [#uses=1] + ret i32 %tmp1not } -int %f4(int %a, ubyte %b) { +define i32 @f4(i32 %a, i8 %b) { entry: - %tmp3 = shl int %a, ubyte %b ; <int> [#uses=1] - %tmp3not = xor int %tmp3, -1 ; <int> [#uses=1] - ret int %tmp3not + %shift.upgrd.1 = zext i8 %b to i32 ; <i32> [#uses=1] + %tmp3 = shl i32 %a, %shift.upgrd.1 ; <i32> [#uses=1] + %tmp3not = xor i32 %tmp3, -1 ; <i32> [#uses=1] + ret i32 %tmp3not } -uint %f5(uint %a) { +define i32 @f5(i32 %a) { entry: - %tmp1 = lshr uint %a, ubyte 2 ; <uint> [#uses=1] - %tmp1not = xor uint %tmp1, 4294967295 ; <uint> [#uses=1] - ret uint %tmp1not + %tmp1 = lshr i32 %a, 2 ; <i32> [#uses=1] + %tmp1not = xor i32 %tmp1, -1 ; <i32> [#uses=1] + ret i32 %tmp1not } -uint %f6(uint %a, ubyte %b) { +define i32 @f6(i32 %a, i8 %b) { entry: - %tmp2 = lshr uint %a, ubyte %b ; <uint> [#uses=1] - %tmp2not = xor uint %tmp2, 4294967295 ; <uint> [#uses=1] - ret uint %tmp2not + %shift.upgrd.2 = zext i8 %b to i32 ; <i32> [#uses=1] + %tmp2 = lshr i32 %a, %shift.upgrd.2 ; <i32> [#uses=1] + %tmp2not = xor i32 %tmp2, -1 ; <i32> [#uses=1] + ret i32 %tmp2not } -int %f7(int %a) { +define i32 @f7(i32 %a) { entry: - %tmp1 = ashr int %a, ubyte 2 ; <int> [#uses=1] - %tmp1not = xor int %tmp1, -1 ; <int> [#uses=1] - ret int %tmp1not + %tmp1 = ashr i32 %a, 2 ; <i32> [#uses=1] + %tmp1not = xor i32 %tmp1, -1 ; <i32> [#uses=1] + ret i32 %tmp1not } -int %f8(int %a, ubyte %b) { +define i32 @f8(i32 %a, i8 %b) { entry: - %tmp3 = ashr int %a, ubyte %b ; <int> [#uses=1] - %tmp3not = xor int %tmp3, -1 ; <int> [#uses=1] - ret int %tmp3not + %shift.upgrd.3 = zext i8 %b to i32 ; <i32> [#uses=1] + %tmp3 = ashr i32 %a, %shift.upgrd.3 ; <i32> [#uses=1] + %tmp3not = xor i32 %tmp3, -1 ; <i32> [#uses=1] + ret i32 %tmp3not } -int %f9() { +define i32 @f9() { entry: - %tmp4845 = add int 0, 0 - br label %cond_true4848 + %tmp4845 = add i32 0, 0 ; <i32> [#uses=1] + br label %cond_true4848 -cond_true4848: ; preds = %bb4835 - %tmp4851 = sub int -3, 0 ; <int> [#uses=1] - %abc = add int %tmp4851, %tmp4845 - ret int %abc +cond_true4848: ; preds = %entry + %tmp4851 = sub i32 -3, 0 ; <i32> [#uses=1] + %abc = add i32 %tmp4851, %tmp4845 ; <i32> [#uses=1] + ret i32 %abc } -bool %f10(int %a) { +define i1 @f10(i32 %a) { entry: - %tmp102 = seteq int -2, %a ; <bool> [#uses=1] - ret bool %tmp102 + %tmp102 = icmp eq i32 -2, %a ; <i1> [#uses=1] + ret i1 %tmp102 } diff --git a/test/CodeGen/ARM/pack.ll b/test/CodeGen/ARM/pack.ll index feb49d0..151beac 100644 --- a/test/CodeGen/ARM/pack.ll +++ b/test/CodeGen/ARM/pack.ll @@ -1,80 +1,73 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v6 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 | \ ; RUN: grep pkhbt | count 5 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v6 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 | \ ; RUN: grep pkhtb | count 4 -; END. -implementation ; Functions: - -int %test1(int %X, int %Y) { - %tmp1 = and int %X, 65535 ; <int> [#uses=1] - %tmp4 = shl int %Y, ubyte 16 ; <int> [#uses=1] - %tmp5 = or int %tmp4, %tmp1 ; <int> [#uses=1] - ret int %tmp5 +define i32 @test1(i32 %X, i32 %Y) { + %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1] + %tmp4 = shl i32 %Y, 16 ; <i32> [#uses=1] + %tmp5 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1] + ret i32 %tmp5 } -int %test1a(int %X, int %Y) { - %tmp19 = and int %X, 65535 ; <int> [#uses=1] - %tmp37 = shl int %Y, ubyte 16 ; <int> [#uses=1] - %tmp5 = or int %tmp37, %tmp19 ; <int> [#uses=1] - ret int %tmp5 +define i32 @test1a(i32 %X, i32 %Y) { + %tmp19 = and i32 %X, 65535 ; <i32> [#uses=1] + %tmp37 = shl i32 %Y, 16 ; <i32> [#uses=1] + %tmp5 = or i32 %tmp37, %tmp19 ; <i32> [#uses=1] + ret i32 %tmp5 } -int %test2(int %X, int %Y) { - %tmp1 = and int %X, 65535 ; <int> [#uses=1] - %tmp3 = shl int %Y, ubyte 12 ; <int> [#uses=1] - %tmp4 = and int %tmp3, -65536 ; <int> [#uses=1] - %tmp57 = or int %tmp4, %tmp1 ; <int> [#uses=1] - ret int %tmp57 +define i32 @test2(i32 %X, i32 %Y) { + %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1] + %tmp3 = shl i32 %Y, 12 ; <i32> [#uses=1] + %tmp4 = and i32 %tmp3, -65536 ; <i32> [#uses=1] + %tmp57 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1] + ret i32 %tmp57 } -int %test3(int %X, int %Y) { - %tmp19 = and int %X, 65535 ; <int> [#uses=1] - %tmp37 = shl int %Y, ubyte 18 ; <int> [#uses=1] - %tmp5 = or int %tmp37, %tmp19 ; <int> [#uses=1] - ret int %tmp5 +define i32 @test3(i32 %X, i32 %Y) { + %tmp19 = and i32 %X, 65535 ; <i32> [#uses=1] + %tmp37 = shl i32 %Y, 18 ; <i32> [#uses=1] + %tmp5 = or i32 %tmp37, %tmp19 ; <i32> [#uses=1] + ret i32 %tmp5 } -int %test4(int %X, int %Y) { - %tmp1 = and int %X, 65535 ; <int> [#uses=1] - %tmp3 = and int %Y, -65536 ; <int> [#uses=1] - %tmp46 = or int %tmp3, %tmp1 ; <int> [#uses=1] - ret int %tmp46 +define i32 @test4(i32 %X, i32 %Y) { + %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1] + %tmp3 = and i32 %Y, -65536 ; <i32> [#uses=1] + %tmp46 = or i32 %tmp3, %tmp1 ; <i32> [#uses=1] + ret i32 %tmp46 } -int %test5(int %X, int %Y) { - %tmp17 = and int %X, -65536 ; <int> [#uses=1] - %tmp2 = cast int %Y to uint ; <uint> [#uses=1] - %tmp4 = shr uint %tmp2, ubyte 16 ; <uint> [#uses=1] - %tmp4 = cast uint %tmp4 to int ; <int> [#uses=1] - %tmp5 = or int %tmp4, %tmp17 ; <int> [#uses=1] - ret int %tmp5 +define i32 @test5(i32 %X, i32 %Y) { + %tmp17 = and i32 %X, -65536 ; <i32> [#uses=1] + %tmp2 = bitcast i32 %Y to i32 ; <i32> [#uses=1] + %tmp4 = lshr i32 %tmp2, 16 ; <i32> [#uses=2] + %tmp5 = or i32 %tmp4, %tmp17 ; <i32> [#uses=1] + ret i32 %tmp5 } -int %test5a(int %X, int %Y) { - %tmp110 = and int %X, -65536 ; <int> [#uses=1] - %Y = cast int %Y to uint ; <uint> [#uses=1] - %tmp37 = shr uint %Y, ubyte 16 ; <uint> [#uses=1] - %tmp39 = cast uint %tmp37 to int ; <int> [#uses=1] - %tmp5 = or int %tmp39, %tmp110 ; <int> [#uses=1] - ret int %tmp5 +define i32 @test5a(i32 %X, i32 %Y) { + %tmp110 = and i32 %X, -65536 ; <i32> [#uses=1] + %tmp37 = lshr i32 %Y, 16 ; <i32> [#uses=1] + %tmp39 = bitcast i32 %tmp37 to i32 ; <i32> [#uses=1] + %tmp5 = or i32 %tmp39, %tmp110 ; <i32> [#uses=1] + ret i32 %tmp5 } -int %test6(int %X, int %Y) { - %tmp1 = and int %X, -65536 ; <int> [#uses=1] - %Y = cast int %Y to uint ; <uint> [#uses=1] - %tmp37 = shr uint %Y, ubyte 12 ; <uint> [#uses=1] - %tmp38 = cast uint %tmp37 to int ; <int> [#uses=1] - %tmp4 = and int %tmp38, 65535 ; <int> [#uses=1] - %tmp59 = or int %tmp4, %tmp1 ; <int> [#uses=1] - ret int %tmp59 +define i32 @test6(i32 %X, i32 %Y) { + %tmp1 = and i32 %X, -65536 ; <i32> [#uses=1] + %tmp37 = lshr i32 %Y, 12 ; <i32> [#uses=1] + %tmp38 = bitcast i32 %tmp37 to i32 ; <i32> [#uses=1] + %tmp4 = and i32 %tmp38, 65535 ; <i32> [#uses=1] + %tmp59 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1] + ret i32 %tmp59 } -int %test7(int %X, int %Y) { - %tmp1 = and int %X, -65536 ; <int> [#uses=1] - %tmp3 = shr int %Y, ubyte 18 ; <int> [#uses=1] - %tmp4 = and int %tmp3, 65535 ; <int> [#uses=1] - %tmp57 = or int %tmp4, %tmp1 ; <int> [#uses=1] - ret int %tmp57 +define i32 @test7(i32 %X, i32 %Y) { + %tmp1 = and i32 %X, -65536 ; <i32> [#uses=1] + %tmp3 = ashr i32 %Y, 18 ; <i32> [#uses=1] + %tmp4 = and i32 %tmp3, 65535 ; <i32> [#uses=1] + %tmp57 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1] + ret i32 %tmp57 } - diff --git a/test/CodeGen/ARM/ret0.ll b/test/CodeGen/ARM/ret0.ll index 176b2e0..792b169 100644 --- a/test/CodeGen/ARM/ret0.ll +++ b/test/CodeGen/ARM/ret0.ll @@ -1,4 +1,5 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -int %test() { - ret int 0 +; RUN: llvm-as < %s | llc -march=arm + +define i32 @test() { + ret i32 0 } diff --git a/test/CodeGen/ARM/ret_arg1.ll b/test/CodeGen/ARM/ret_arg1.ll index d490cb3..48a1fda 100644 --- a/test/CodeGen/ARM/ret_arg1.ll +++ b/test/CodeGen/ARM/ret_arg1.ll @@ -1,4 +1,5 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -int %test(int %a1) { - ret int %a1 +; RUN: llvm-as < %s | llc -march=arm + +define i32 @test(i32 %a1) { + ret i32 %a1 } diff --git a/test/CodeGen/ARM/ret_arg2.ll b/test/CodeGen/ARM/ret_arg2.ll index eb155da..a74870f 100644 --- a/test/CodeGen/ARM/ret_arg2.ll +++ b/test/CodeGen/ARM/ret_arg2.ll @@ -1,4 +1,6 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -int %test(int %a1, int %a2) { - ret int %a2 +; RUN: llvm-as < %s | llc -march=arm + +define i32 @test(i32 %a1, i32 %a2) { + ret i32 %a2 } + diff --git a/test/CodeGen/ARM/ret_arg3.ll b/test/CodeGen/ARM/ret_arg3.ll index 41fc930..9210e7b 100644 --- a/test/CodeGen/ARM/ret_arg3.ll +++ b/test/CodeGen/ARM/ret_arg3.ll @@ -1,4 +1,5 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -int %test(int %a1, int %a2, int %a3) { - ret int %a3 +; RUN: llvm-as < %s | llc -march=arm +define i32 @test(i32 %a1, i32 %a2, i32 %a3) { + ret i32 %a3 } + diff --git a/test/CodeGen/ARM/ret_arg4.ll b/test/CodeGen/ARM/ret_arg4.ll index e04f296..a9c66e9 100644 --- a/test/CodeGen/ARM/ret_arg4.ll +++ b/test/CodeGen/ARM/ret_arg4.ll @@ -1,4 +1,5 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -int %test(int %a1, int %a2, int %a3, int %a4) { - ret int %a4 +; RUN: llvm-as < %s | llc -march=arm + +define i32 @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) { + ret i32 %a4 } diff --git a/test/CodeGen/ARM/ret_arg5.ll b/test/CodeGen/ARM/ret_arg5.ll index a49929b..620a017 100644 --- a/test/CodeGen/ARM/ret_arg5.ll +++ b/test/CodeGen/ARM/ret_arg5.ll @@ -1,4 +1,5 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -int %test(int %a1, int %a2, int %a3, int %a4, int %a5) { - ret int %a5 +; RUN: llvm-as < %s | llc -march=arm + +define i32 @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5) { + ret i32 %a5 } diff --git a/test/CodeGen/ARM/ret_void.ll b/test/CodeGen/ARM/ret_void.ll index 5cd82e3..68db8c4 100644 --- a/test/CodeGen/ARM/ret_void.ll +++ b/test/CodeGen/ARM/ret_void.ll @@ -1,4 +1,6 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -void %test() { - ret void +; RUN: llvm-as < %s | llc -march=arm + +define void @test() { + ret void } + diff --git a/test/CodeGen/ARM/rev.ll b/test/CodeGen/ARM/rev.ll index 0072dae..68f6264 100644 --- a/test/CodeGen/ARM/rev.ll +++ b/test/CodeGen/ARM/rev.ll @@ -1,29 +1,27 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v6 | grep rev16 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v6 | grep revsh +; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 | grep rev16 +; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 | grep revsh -int %test1(uint %X) { - %tmp1 = shr uint %X, ubyte 8 ; <uint> [#uses=1] - %tmp1 = cast uint %tmp1 to int ; <int> [#uses=2] - %X15 = cast uint %X to int ; <int> [#uses=1] - %tmp4 = shl int %X15, ubyte 8 ; <int> [#uses=2] - %tmp2 = and int %tmp1, 16711680 ; <int> [#uses=1] - %tmp5 = and int %tmp4, -16777216 ; <int> [#uses=1] - %tmp9 = and int %tmp1, 255 ; <int> [#uses=1] - %tmp13 = and int %tmp4, 65280 ; <int> [#uses=1] - %tmp6 = or int %tmp5, %tmp2 ; <int> [#uses=1] - %tmp10 = or int %tmp6, %tmp13 ; <int> [#uses=1] - %tmp14 = or int %tmp10, %tmp9 ; <int> [#uses=1] - ret int %tmp14 +define i32 @test1(i32 %X) { + %tmp1 = lshr i32 %X, 8 ; <i32> [#uses=3] + %X15 = bitcast i32 %X to i32 ; <i32> [#uses=1] + %tmp4 = shl i32 %X15, 8 ; <i32> [#uses=2] + %tmp2 = and i32 %tmp1, 16711680 ; <i32> [#uses=1] + %tmp5 = and i32 %tmp4, -16777216 ; <i32> [#uses=1] + %tmp9 = and i32 %tmp1, 255 ; <i32> [#uses=1] + %tmp13 = and i32 %tmp4, 65280 ; <i32> [#uses=1] + %tmp6 = or i32 %tmp5, %tmp2 ; <i32> [#uses=1] + %tmp10 = or i32 %tmp6, %tmp13 ; <i32> [#uses=1] + %tmp14 = or i32 %tmp10, %tmp9 ; <i32> [#uses=1] + ret i32 %tmp14 } -int %test2(uint %X) { ; revsh - %tmp1 = shr uint %X, ubyte 8 ; <uint> [#uses=1] - %tmp1 = cast uint %tmp1 to short ; <short> [#uses=1] - %tmp3 = cast uint %X to short ; <short> [#uses=1] - %tmp2 = and short %tmp1, 255 ; <short> [#uses=1] - %tmp4 = shl short %tmp3, ubyte 8 ; <short> [#uses=1] - %tmp5 = or short %tmp2, %tmp4 ; <short> [#uses=1] - %tmp5 = cast short %tmp5 to int ; <int> [#uses=1] - ret int %tmp5 +define i32 @test2(i32 %X) { + %tmp1 = lshr i32 %X, 8 ; <i32> [#uses=1] + %tmp1.upgrd.1 = trunc i32 %tmp1 to i16 ; <i16> [#uses=1] + %tmp3 = trunc i32 %X to i16 ; <i16> [#uses=1] + %tmp2 = and i16 %tmp1.upgrd.1, 255 ; <i16> [#uses=1] + %tmp4 = shl i16 %tmp3, 8 ; <i16> [#uses=1] + %tmp5 = or i16 %tmp2, %tmp4 ; <i16> [#uses=1] + %tmp5.upgrd.2 = sext i16 %tmp5 to i32 ; <i32> [#uses=1] + ret i32 %tmp5.upgrd.2 } - diff --git a/test/CodeGen/ARM/section.ll b/test/CodeGen/ARM/section.ll index fcb86c5..0397bcc 100644 --- a/test/CodeGen/ARM/section.ll +++ b/test/CodeGen/ARM/section.ll @@ -1,6 +1,7 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-linux | \ +; RUN: llvm-as < %s | llc -mtriple=arm-linux | \ ; RUN: grep {__DTOR_END__:} -; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-linux | \ +; RUN: llvm-as < %s | llc -mtriple=arm-linux | \ ; RUN: grep {.section .dtors,"aw",.progbits} -%__DTOR_END__ = internal global [1 x int] zeroinitializer, section ".dtors" +@__DTOR_END__ = internal global [1 x i32] zeroinitializer, section ".dtors" ; <[1 x i32]*> [#uses=0] + diff --git a/test/CodeGen/ARM/shifter_operand.ll b/test/CodeGen/ARM/shifter_operand.ll index 313caed..cae1c44 100644 --- a/test/CodeGen/ARM/shifter_operand.ll +++ b/test/CodeGen/ARM/shifter_operand.ll @@ -1,15 +1,18 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep add | grep lsl -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep bic | grep asr +; RUN: llvm-as < %s | llc -march=arm | grep add | grep lsl +; RUN: llvm-as < %s | llc -march=arm | grep bic | grep asr -int %test1(int %X, int %Y, ubyte %sh) { - %A = shl int %Y, ubyte %sh - %B = add int %X, %A - ret int %B + +define i32 @test1(i32 %X, i32 %Y, i8 %sh) { + %shift.upgrd.1 = zext i8 %sh to i32 ; <i32> [#uses=1] + %A = shl i32 %Y, %shift.upgrd.1 ; <i32> [#uses=1] + %B = add i32 %X, %A ; <i32> [#uses=1] + ret i32 %B } -int %test2(int %X, int %Y, ubyte %sh) { - %A = shr int %Y, ubyte %sh - %B = xor int %A, -1 - %C = and int %X, %B - ret int %C +define i32 @test2(i32 %X, i32 %Y, i8 %sh) { + %shift.upgrd.2 = zext i8 %sh to i32 ; <i32> [#uses=1] + %A = ashr i32 %Y, %shift.upgrd.2 ; <i32> [#uses=1] + %B = xor i32 %A, -1 ; <i32> [#uses=1] + %C = and i32 %X, %B ; <i32> [#uses=1] + ret i32 %C } diff --git a/test/CodeGen/ARM/smul.ll b/test/CodeGen/ARM/smul.ll index 6458ac8..7a4e488 100644 --- a/test/CodeGen/ARM/smul.ll +++ b/test/CodeGen/ARM/smul.ll @@ -1,35 +1,36 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v5TE -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v5TE | \ +; RUN: llvm-as < %s | llc -march=arm +; RUN: llvm-as < %s | llc -march=arm -mattr=+v5TE +; RUN: llvm-as < %s | llc -march=arm -mattr=+v5TE | \ ; RUN: grep smulbt | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v5TE | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+v5TE | \ ; RUN: grep smultt | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v5TE | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+v5TE | \ ; RUN: grep smlabt | count 1 -%x = weak global short 0 -%y = weak global short 0 +@x = weak global i16 0 ; <i16*> [#uses=1] +@y = weak global i16 0 ; <i16*> [#uses=0] -int %f1(int %y) { - %tmp = load short* %x - %tmp1 = add short %tmp, 2 - %tmp2 = cast short %tmp1 to int - %tmp3 = shr int %y, ubyte 16 - %tmp4 = mul int %tmp2, %tmp3 - ret int %tmp4 +define i32 @f1(i32 %y) { + %tmp = load i16* @x ; <i16> [#uses=1] + %tmp1 = add i16 %tmp, 2 ; <i16> [#uses=1] + %tmp2 = sext i16 %tmp1 to i32 ; <i32> [#uses=1] + %tmp3 = ashr i32 %y, 16 ; <i32> [#uses=1] + %tmp4 = mul i32 %tmp2, %tmp3 ; <i32> [#uses=1] + ret i32 %tmp4 } -int %f2(int %x, int %y) { - %tmp1 = shr int %x, ubyte 16 - %tmp3 = shr int %y, ubyte 16 - %tmp4 = mul int %tmp3, %tmp1 - ret int %tmp4 +define i32 @f2(i32 %x, i32 %y) { + %tmp1 = ashr i32 %x, 16 ; <i32> [#uses=1] + %tmp3 = ashr i32 %y, 16 ; <i32> [#uses=1] + %tmp4 = mul i32 %tmp3, %tmp1 ; <i32> [#uses=1] + ret i32 %tmp4 } -int %f3(int %a, short %x, int %y) { - %tmp = cast short %x to int - %tmp2 = shr int %y, ubyte 16 - %tmp3 = mul int %tmp2, %tmp - %tmp5 = add int %tmp3, %a - ret int %tmp5 +define i32 @f3(i32 %a, i16 %x, i32 %y) { + %tmp = sext i16 %x to i32 ; <i32> [#uses=1] + %tmp2 = ashr i32 %y, 16 ; <i32> [#uses=1] + %tmp3 = mul i32 %tmp2, %tmp ; <i32> [#uses=1] + %tmp5 = add i32 %tmp3, %a ; <i32> [#uses=1] + ret i32 %tmp5 } + diff --git a/test/CodeGen/ARM/str_post.ll b/test/CodeGen/ARM/str_post.ll index 1dc4155..ba81380 100644 --- a/test/CodeGen/ARM/str_post.ll +++ b/test/CodeGen/ARM/str_post.ll @@ -1,21 +1,21 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep {strh .*\\\[.*\], #-4} | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep {str .*\\\[.*\],} | count 1 -short %test1(int *%X, short *%A) { - %Y = load int* %X - %tmp1 = cast int %Y to short - store short %tmp1, short* %A - %tmp2 = cast short* %A to short - %tmp3 = sub short %tmp2, 4 - ret short %tmp3 +define i16 @test1(i32* %X, i16* %A) { + %Y = load i32* %X ; <i32> [#uses=1] + %tmp1 = trunc i32 %Y to i16 ; <i16> [#uses=1] + store i16 %tmp1, i16* %A + %tmp2 = ptrtoint i16* %A to i16 ; <i16> [#uses=1] + %tmp3 = sub i16 %tmp2, 4 ; <i16> [#uses=1] + ret i16 %tmp3 } -int %test2(int *%X, int *%A) { - %Y = load int* %X - store int %Y, int* %A - %tmp1 = cast int* %A to int - %tmp2 = sub int %tmp1, 4 - ret int %tmp2 +define i32 @test2(i32* %X, i32* %A) { + %Y = load i32* %X ; <i32> [#uses=1] + store i32 %Y, i32* %A + %tmp1 = ptrtoint i32* %A to i32 ; <i32> [#uses=1] + %tmp2 = sub i32 %tmp1, 4 ; <i32> [#uses=1] + ret i32 %tmp2 } diff --git a/test/CodeGen/ARM/str_pre.ll b/test/CodeGen/ARM/str_pre.ll index c948f69..c02663f 100644 --- a/test/CodeGen/ARM/str_pre.ll +++ b/test/CodeGen/ARM/str_pre.ll @@ -1,18 +1,18 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep {str.*\\!} | count 2 -void %test1(int *%X, int *%A, int **%dest) { - %B = load int* %A - %Y = getelementptr int* %X, int 4 - store int %B, int* %Y - store int* %Y, int** %dest +define void @test1(i32* %X, i32* %A, i32** %dest) { + %B = load i32* %A ; <i32> [#uses=1] + %Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2] + store i32 %B, i32* %Y + store i32* %Y, i32** %dest ret void } -short *%test2(short *%X, int *%A) { - %B = load int* %A - %Y = getelementptr short* %X, int 4 - %tmp = cast int %B to short - store short %tmp, short* %Y - ret short* %Y +define i16* @test2(i16* %X, i32* %A) { + %B = load i32* %A ; <i32> [#uses=1] + %Y = getelementptr i16* %X, i32 4 ; <i16*> [#uses=2] + %tmp = trunc i32 %B to i16 ; <i16> [#uses=1] + store i16 %tmp, i16* %Y + ret i16* %Y } diff --git a/test/CodeGen/ARM/str_trunc.ll b/test/CodeGen/ARM/str_trunc.ll index 391e774..77c66ec 100644 --- a/test/CodeGen/ARM/str_trunc.ll +++ b/test/CodeGen/ARM/str_trunc.ll @@ -1,16 +1,16 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep strb | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | \ +; RUN: llvm-as < %s | llc -march=arm | \ ; RUN: grep strh | count 1 -void %test1(int %v, short* %ptr) { - %tmp = cast int %v to short - store short %tmp, short* %ptr - ret void +define void @test1(i32 %v, i16* %ptr) { + %tmp = trunc i32 %v to i16 ; <i16> [#uses=1] + store i16 %tmp, i16* %ptr + ret void } -void %test2(int %v, ubyte* %ptr) { - %tmp = cast int %v to ubyte - store ubyte %tmp, ubyte* %ptr - ret void +define void @test2(i32 %v, i8* %ptr) { + %tmp = trunc i32 %v to i8 ; <i8> [#uses=1] + store i8 %tmp, i8* %ptr + ret void } diff --git a/test/CodeGen/ARM/uxtb.ll b/test/CodeGen/ARM/uxtb.ll index 1787d6f..85659a7 100644 --- a/test/CodeGen/ARM/uxtb.ll +++ b/test/CodeGen/ARM/uxtb.ll @@ -1,76 +1,74 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v6 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 | \ ; RUN: grep uxt | count 10 -; END. -uint %test1(uint %x) { - %tmp1 = and uint %x, 16711935 ; <uint> [#uses=1] - ret uint %tmp1 +define i32 @test1(i32 %x) { + %tmp1 = and i32 %x, 16711935 ; <i32> [#uses=1] + ret i32 %tmp1 } -uint %test2(uint %x) { - %tmp1 = shr uint %x, ubyte 8 ; <uint> [#uses=1] - %tmp2 = and uint %tmp1, 16711935 ; <uint> [#uses=1] - ret uint %tmp2 +define i32 @test2(i32 %x) { + %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] + %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] + ret i32 %tmp2 } -uint %test3(uint %x) { - %tmp1 = shr uint %x, ubyte 8 ; <uint> [#uses=1] - %tmp2 = and uint %tmp1, 16711935 ; <uint> [#uses=1] - ret uint %tmp2 +define i32 @test3(i32 %x) { + %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] + %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] + ret i32 %tmp2 } -uint %test4(uint %x) { - %tmp1 = shr uint %x, ubyte 8 ; <uint> [#uses=1] - %tmp6 = and uint %tmp1, 16711935 ; <uint> [#uses=1] - ret uint %tmp6 +define i32 @test4(i32 %x) { + %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] + %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] + ret i32 %tmp6 } -uint %test5(uint %x) { - %tmp1 = shr uint %x, ubyte 8 ; <uint> [#uses=1] - %tmp2 = and uint %tmp1, 16711935 ; <uint> [#uses=1] - ret uint %tmp2 +define i32 @test5(i32 %x) { + %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] + %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] + ret i32 %tmp2 } -uint %test6(uint %x) { - %tmp1 = shr uint %x, ubyte 16 ; <uint> [#uses=1] - %tmp2 = and uint %tmp1, 255 ; <uint> [#uses=1] - %tmp4 = shl uint %x, ubyte 16 ; <uint> [#uses=1] - %tmp5 = and uint %tmp4, 16711680 ; <uint> [#uses=1] - %tmp6 = or uint %tmp2, %tmp5 ; <uint> [#uses=1] - ret uint %tmp6 +define i32 @test6(i32 %x) { + %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1] + %tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1] + %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1] + %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1] + %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] + ret i32 %tmp6 } -uint %test7(uint %x) { - %tmp1 = shr uint %x, ubyte 16 ; <uint> [#uses=1] - %tmp2 = and uint %tmp1, 255 ; <uint> [#uses=1] - %tmp4 = shl uint %x, ubyte 16 ; <uint> [#uses=1] - %tmp5 = and uint %tmp4, 16711680 ; <uint> [#uses=1] - %tmp6 = or uint %tmp2, %tmp5 ; <uint> [#uses=1] - ret uint %tmp6 +define i32 @test7(i32 %x) { + %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1] + %tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1] + %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1] + %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1] + %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] + ret i32 %tmp6 } -uint %test8(uint %x) { - %tmp1 = shl uint %x, ubyte 8 ; <uint> [#uses=1] - %tmp2 = and uint %tmp1, 16711680 ; <uint> [#uses=1] - %tmp5 = shr uint %x, ubyte 24 ; <uint> [#uses=1] - %tmp6 = or uint %tmp2, %tmp5 ; <uint> [#uses=1] - ret uint %tmp6 +define i32 @test8(i32 %x) { + %tmp1 = shl i32 %x, 8 ; <i32> [#uses=1] + %tmp2 = and i32 %tmp1, 16711680 ; <i32> [#uses=1] + %tmp5 = lshr i32 %x, 24 ; <i32> [#uses=1] + %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] + ret i32 %tmp6 } -uint %test9(uint %x) { - %tmp1 = shr uint %x, ubyte 24 ; <uint> [#uses=1] - %tmp4 = shl uint %x, ubyte 8 ; <uint> [#uses=1] - %tmp5 = and uint %tmp4, 16711680 ; <uint> [#uses=1] - %tmp6 = or uint %tmp5, %tmp1 ; <uint> [#uses=1] - ret uint %tmp6 +define i32 @test9(i32 %x) { + %tmp1 = lshr i32 %x, 24 ; <i32> [#uses=1] + %tmp4 = shl i32 %x, 8 ; <i32> [#uses=1] + %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1] + %tmp6 = or i32 %tmp5, %tmp1 ; <i32> [#uses=1] + ret i32 %tmp6 } -uint %test10(uint %p0) { - %tmp1 = shr uint %p0, ubyte 7 ; <uint> [#uses=1] - %tmp2 = and uint %tmp1, 16253176 ; <uint> [#uses=2] - %tmp4 = shr uint %tmp2, ubyte 5 ; <uint> [#uses=1] - %tmp5 = and uint %tmp4, 458759 ; <uint> [#uses=1] - %tmp7 = or uint %tmp5, %tmp2 ; <uint> [#uses=1] - ret uint %tmp7 +define i32 @test10(i32 %p0) { + %tmp1 = lshr i32 %p0, 7 ; <i32> [#uses=1] + %tmp2 = and i32 %tmp1, 16253176 ; <i32> [#uses=2] + %tmp4 = lshr i32 %tmp2, 5 ; <i32> [#uses=1] + %tmp5 = and i32 %tmp4, 458759 ; <i32> [#uses=1] + %tmp7 = or i32 %tmp5, %tmp2 ; <i32> [#uses=1] + ret i32 %tmp7 } - diff --git a/test/CodeGen/ARM/vargs.ll b/test/CodeGen/ARM/vargs.ll index aa5e8e6..4bf79c0 100644 --- a/test/CodeGen/ARM/vargs.ll +++ b/test/CodeGen/ARM/vargs.ll @@ -1,13 +1,12 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -%str = internal constant [43 x sbyte] c"Hello World %d %d %d %d %d %d %d %d %d %d\0A\00" ; <[43 x sbyte]*> [#uses=1] +; RUN: llvm-as < %s | llc -march=arm +@str = internal constant [43 x i8] c"Hello World %d %d %d %d %d %d %d %d %d %d\0A\00" ; <[43 x i8]*> [#uses=1] -implementation ; Functions: - -int %main() { +define i32 @main() { entry: - %tmp = call int (sbyte*, ...)* %printf( sbyte* getelementptr ([43 x sbyte]* %str, int 0, uint 0), int 1, int 2, int 3, int 4, int 5, int 6, int 7, int 8, int 9, int 10 ) ; <int> [#uses=0] - %tmp2 = call int (sbyte*, ...)* %printf( sbyte* getelementptr ([43 x sbyte]* %str, int 0, uint 0), int 10, int 9, int 8, int 7, int 6, int 5, int 4, int 3, int 2, int 1 ) ; <int> [#uses=0] - ret int 11 + %tmp = call i32 (i8*, ...)* @printf( i8* getelementptr ([43 x i8]* @str, i32 0, i64 0), i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10 ) ; <i32> [#uses=0] + %tmp2 = call i32 (i8*, ...)* @printf( i8* getelementptr ([43 x i8]* @str, i32 0, i64 0), i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1 ) ; <i32> [#uses=0] + ret i32 11 } -declare int %printf(sbyte*, ...) +declare i32 @printf(i8*, ...) + diff --git a/test/CodeGen/ARM/vargs2.ll b/test/CodeGen/ARM/vargs2.ll index ae4ca5b..fb0b8d8 100644 --- a/test/CodeGen/ARM/vargs2.ll +++ b/test/CodeGen/ARM/vargs2.ll @@ -1,36 +1,36 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=thumb -; RUN: llvm-upgrade < %s | llvm-as | llc -march=thumb | \ +; RUN: llvm-as < %s | llc -march=thumb +; RUN: llvm-as < %s | llc -march=thumb | \ ; RUN: grep pop | count 2 -%str = internal constant [4 x sbyte] c"%d\0A\00" ; <[4 x sbyte]*> [#uses=1] +@str = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1] -implementation ; Functions: - -void %f(int %a, ...) { +define void @f(i32 %a, ...) { entry: - %va = alloca sbyte*, align 4 ; <sbyte**> [#uses=4] - call void %llvm.va_start( sbyte** %va ) - br label %bb - -bb: ; preds = %bb, %entry - %a_addr.0 = phi int [ %a, %entry ], [ %tmp5, %bb ] ; <int> [#uses=2] - %tmp = volatile load sbyte** %va ; <sbyte*> [#uses=2] - %tmp2 = getelementptr sbyte* %tmp, int 4 ; <sbyte*> [#uses=1] - volatile store sbyte* %tmp2, sbyte** %va - %tmp5 = add int %a_addr.0, -1 ; <int> [#uses=1] - %tmp = seteq int %a_addr.0, 1 ; <bool> [#uses=1] - br bool %tmp, label %bb7, label %bb - -bb7: ; preds = %bb - %tmp3 = cast sbyte* %tmp to int* ; <int*> [#uses=1] - %tmp = load int* %tmp3 ; <int> [#uses=1] - %tmp10 = call int (sbyte*, ...)* %printf( sbyte* getelementptr ([4 x sbyte]* %str, int 0, uint 0), int %tmp ) ; <int> [#uses=0] - call void %llvm.va_end( sbyte** %va ) - ret void + %va = alloca i8*, align 4 ; <i8**> [#uses=4] + %va.upgrd.1 = bitcast i8** %va to i8* ; <i8*> [#uses=1] + call void @llvm.va_start( i8* %va.upgrd.1 ) + br label %bb + +bb: ; preds = %bb, %entry + %a_addr.0 = phi i32 [ %a, %entry ], [ %tmp5, %bb ] ; <i32> [#uses=2] + %tmp = volatile load i8** %va ; <i8*> [#uses=2] + %tmp2 = getelementptr i8* %tmp, i32 4 ; <i8*> [#uses=1] + volatile store i8* %tmp2, i8** %va + %tmp5 = add i32 %a_addr.0, -1 ; <i32> [#uses=1] + %tmp.upgrd.2 = icmp eq i32 %a_addr.0, 1 ; <i1> [#uses=1] + br i1 %tmp.upgrd.2, label %bb7, label %bb + +bb7: ; preds = %bb + %tmp3 = bitcast i8* %tmp to i32* ; <i32*> [#uses=1] + %tmp.upgrd.3 = load i32* %tmp3 ; <i32> [#uses=1] + %tmp10 = call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @str, i32 0, i64 0), i32 %tmp.upgrd.3 ) ; <i32> [#uses=0] + %va.upgrd.4 = bitcast i8** %va to i8* ; <i8*> [#uses=1] + call void @llvm.va_end( i8* %va.upgrd.4 ) + ret void } -declare void %llvm.va_start(sbyte**) +declare void @llvm.va_start(i8*) -declare int %printf(sbyte*, ...) +declare i32 @printf(i8*, ...) -declare void %llvm.va_end(sbyte**) +declare void @llvm.va_end(i8*) diff --git a/test/CodeGen/ARM/vfp.ll b/test/CodeGen/ARM/vfp.ll index 11f668e..2acb33f 100644 --- a/test/CodeGen/ARM/vfp.ll +++ b/test/CodeGen/ARM/vfp.ll @@ -1,150 +1,144 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fabs | count 2 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fmscs | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fcvt | count 2 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fuito | count 2 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fto.i | count 4 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep bmi | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep bgt | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fcmpezs | count 1 -void %test(float *%P, double* %D) { - %A = load float* %P - %B = load double* %D - store float %A, float* %P - store double %B, double* %D - ret void +define void @test(float* %P, double* %D) { + %A = load float* %P ; <float> [#uses=1] + %B = load double* %D ; <double> [#uses=1] + store float %A, float* %P + store double %B, double* %D + ret void } -declare float %fabsf(float) -declare double %fabs(double) +declare float @fabsf(float) -void %test_abs(float *%P, double* %D) { - %a = load float* %P - %b = call float %fabsf(float %a) - store float %b, float* %P +declare double @fabs(double) - %A = load double* %D - %B = call double %fabs(double %A) - store double %B, double* %D - ret void +define void @test_abs(float* %P, double* %D) { + %a = load float* %P ; <float> [#uses=1] + %b = call float @fabsf( float %a ) ; <float> [#uses=1] + store float %b, float* %P + %A = load double* %D ; <double> [#uses=1] + %B = call double @fabs( double %A ) ; <double> [#uses=1] + store double %B, double* %D + ret void } -void %test_add(float *%P, double* %D) { - %a = load float* %P - %b = add float %a, %a - store float %b, float* %P - - %A = load double* %D - %B = add double %A, %A - store double %B, double* %D - ret void +define void @test_add(float* %P, double* %D) { + %a = load float* %P ; <float> [#uses=2] + %b = add float %a, %a ; <float> [#uses=1] + store float %b, float* %P + %A = load double* %D ; <double> [#uses=2] + %B = add double %A, %A ; <double> [#uses=1] + store double %B, double* %D + ret void } -void %test_ext_round(float *%P, double* %D) { - %a = load float* %P - %b = cast float %a to double - - %A = load double* %D - %B = cast double %A to float - - store double %b, double* %D - store float %B, float* %P - ret void +define void @test_ext_round(float* %P, double* %D) { + %a = load float* %P ; <float> [#uses=1] + %b = fpext float %a to double ; <double> [#uses=1] + %A = load double* %D ; <double> [#uses=1] + %B = fptrunc double %A to float ; <float> [#uses=1] + store double %b, double* %D + store float %B, float* %P + ret void } -void %test_fma(float *%P1, float* %P2, float *%P3) { - %a1 = load float* %P1 - %a2 = load float* %P2 - %a3 = load float* %P3 - - %X = mul float %a1, %a2 - %Y = sub float %X, %a3 - - store float %Y, float* %P1 - ret void +define void @test_fma(float* %P1, float* %P2, float* %P3) { + %a1 = load float* %P1 ; <float> [#uses=1] + %a2 = load float* %P2 ; <float> [#uses=1] + %a3 = load float* %P3 ; <float> [#uses=1] + %X = mul float %a1, %a2 ; <float> [#uses=1] + %Y = sub float %X, %a3 ; <float> [#uses=1] + store float %Y, float* %P1 + ret void } -int %test_ftoi(float *%P1) { - %a1 = load float* %P1 - %b1 = cast float %a1 to int - ret int %b1 +define i32 @test_ftoi(float* %P1) { + %a1 = load float* %P1 ; <float> [#uses=1] + %b1 = fptosi float %a1 to i32 ; <i32> [#uses=1] + ret i32 %b1 } -uint %test_ftou(float *%P1) { - %a1 = load float* %P1 - %b1 = cast float %a1 to uint - ret uint %b1 +define i32 @test_ftou(float* %P1) { + %a1 = load float* %P1 ; <float> [#uses=1] + %b1 = fptoui float %a1 to i32 ; <i32> [#uses=1] + ret i32 %b1 } -int %test_dtoi(double *%P1) { - %a1 = load double* %P1 - %b1 = cast double %a1 to int - ret int %b1 +define i32 @test_dtoi(double* %P1) { + %a1 = load double* %P1 ; <double> [#uses=1] + %b1 = fptosi double %a1 to i32 ; <i32> [#uses=1] + ret i32 %b1 } -uint %test_dtou(double *%P1) { - %a1 = load double* %P1 - %b1 = cast double %a1 to uint - ret uint %b1 +define i32 @test_dtou(double* %P1) { + %a1 = load double* %P1 ; <double> [#uses=1] + %b1 = fptoui double %a1 to i32 ; <i32> [#uses=1] + ret i32 %b1 } -void %test_utod(double *%P1, uint %X) { - %b1 = cast uint %X to double - store double %b1, double* %P1 - ret void +define void @test_utod(double* %P1, i32 %X) { + %b1 = uitofp i32 %X to double ; <double> [#uses=1] + store double %b1, double* %P1 + ret void } -void %test_utod2(double *%P1, ubyte %X) { - %b1 = cast ubyte %X to double - store double %b1, double* %P1 - ret void +define void @test_utod2(double* %P1, i8 %X) { + %b1 = uitofp i8 %X to double ; <double> [#uses=1] + store double %b1, double* %P1 + ret void } -void %test_cmp(float* %glob, int %X) { +define void @test_cmp(float* %glob, i32 %X) { entry: - %tmp = load float* %glob ; <float> [#uses=2] - %tmp3 = getelementptr float* %glob, int 2 ; <float*> [#uses=1] - %tmp4 = load float* %tmp3 ; <float> [#uses=2] - %tmp = seteq float %tmp, %tmp4 ; <bool> [#uses=1] - %tmp5 = tail call bool %llvm.isunordered.f32( float %tmp, float %tmp4 ) ; <bool> [#uses=1] - %tmp6 = or bool %tmp, %tmp5 ; <bool> [#uses=1] - br bool %tmp6, label %cond_true, label %cond_false - -cond_true: ; preds = %entry - %tmp = tail call int (...)* %bar( ) ; <int> [#uses=0] - ret void - -cond_false: ; preds = %entry - %tmp7 = tail call int (...)* %baz( ) ; <int> [#uses=0] - ret void + %tmp = load float* %glob ; <float> [#uses=2] + %tmp3 = getelementptr float* %glob, i32 2 ; <float*> [#uses=1] + %tmp4 = load float* %tmp3 ; <float> [#uses=2] + %tmp.upgrd.1 = fcmp oeq float %tmp, %tmp4 ; <i1> [#uses=1] + %tmp5 = fcmp uno float %tmp, %tmp4 ; <i1> [#uses=1] + %tmp6 = or i1 %tmp.upgrd.1, %tmp5 ; <i1> [#uses=1] + br i1 %tmp6, label %cond_true, label %cond_false + +cond_true: ; preds = %entry + %tmp.upgrd.2 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0] + ret void + +cond_false: ; preds = %entry + %tmp7 = tail call i32 (...)* @baz( ) ; <i32> [#uses=0] + ret void } -declare bool %llvm.isunordered.f32(float, float) +declare i1 @llvm.isunordered.f32(float, float) -declare int %bar(...) +declare i32 @bar(...) -declare int %baz(...) +declare i32 @baz(...) -void %test_cmpfp0(float* %glob, int %X) { +define void @test_cmpfp0(float* %glob, i32 %X) { entry: - %tmp = load float* %glob ; <float> [#uses=1] - %tmp = setgt float %tmp, 0.000000e+00 ; <bool> [#uses=1] - br bool %tmp, label %cond_true, label %cond_false + %tmp = load float* %glob ; <float> [#uses=1] + %tmp.upgrd.3 = fcmp ogt float %tmp, 0.000000e+00 ; <i1> [#uses=1] + br i1 %tmp.upgrd.3, label %cond_true, label %cond_false -cond_true: ; preds = %entry - %tmp = tail call int (...)* %bar( ) ; <int> [#uses=0] - ret void +cond_true: ; preds = %entry + %tmp.upgrd.4 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0] + ret void -cond_false: ; preds = %entry - %tmp1 = tail call int (...)* %baz( ) ; <int> [#uses=0] - ret void +cond_false: ; preds = %entry + %tmp1 = tail call i32 (...)* @baz( ) ; <i32> [#uses=0] + ret void } - diff --git a/test/CodeGen/ARM/weak.ll b/test/CodeGen/ARM/weak.ll index f1294d8..dadd1b9 100644 --- a/test/CodeGen/ARM/weak.ll +++ b/test/CodeGen/ARM/weak.ll @@ -1,17 +1,16 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep .weak.*f -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep .weak.*h +; RUN: llvm-as < %s | llc -march=arm | grep .weak.*f +; RUN: llvm-as < %s | llc -march=arm | grep .weak.*h -implementation ; Functions: - -weak uint %f() { +define weak i32 @f() { entry: - unreachable + unreachable } -void %g() { +define void @g() { entry: - tail call void %h( ) + tail call void @h( ) ret void } -declare extern_weak void %h() +declare extern_weak void @h() + |