diff options
author | Scott Michel <scottm@aero.org> | 2009-01-26 03:31:40 +0000 |
---|---|---|
committer | Scott Michel <scottm@aero.org> | 2009-01-26 03:31:40 +0000 |
commit | c9c8b2a804b2cd3d33a6a965e06a21ff93968f97 (patch) | |
tree | 6141f9f0ec12fefbdd984667613aaf33da6068af /test/CodeGen | |
parent | 5bf4b7556f025587a8d1a14bd0fb39c12fc9c170 (diff) | |
download | external_llvm-c9c8b2a804b2cd3d33a6a965e06a21ff93968f97.zip external_llvm-c9c8b2a804b2cd3d33a6a965e06a21ff93968f97.tar.gz external_llvm-c9c8b2a804b2cd3d33a6a965e06a21ff93968f97.tar.bz2 |
CellSPU:
- Rename fcmp.ll test to fcmp32.ll, start adding new double tests to fcmp64.ll
- Fix select_bits.ll test
- Capitulate to the DAGCombiner and move i64 constant loads to instruction
selection (SPUISelDAGtoDAG.cpp).
<rant>DAGCombiner will insert all kinds of 64-bit optimizations after
operation legalization occurs and now we have to do most of the work that
instruction selection should be doing twice (once to determine if v2i64
build_vector can be handled by SelectCode(), which then runs all of the
predicates a second time to select the necessary instructions.) But,
CellSPU is a good citizen.</rant>
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@62990 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r-- | test/CodeGen/CellSPU/fcmp32.ll (renamed from test/CodeGen/CellSPU/fcmp.ll) | 15 | ||||
-rw-r--r-- | test/CodeGen/CellSPU/fcmp64.ll | 7 | ||||
-rw-r--r-- | test/CodeGen/CellSPU/fneg-fabs.ll | 11 | ||||
-rw-r--r-- | test/CodeGen/CellSPU/select_bits.ll | 114 | ||||
-rw-r--r-- | test/CodeGen/CellSPU/shift_ops.ll | 6 |
5 files changed, 84 insertions, 69 deletions
diff --git a/test/CodeGen/CellSPU/fcmp.ll b/test/CodeGen/CellSPU/fcmp32.ll index aad7717..27a659e 100644 --- a/test/CodeGen/CellSPU/fcmp.ll +++ b/test/CodeGen/CellSPU/fcmp32.ll @@ -1,22 +1,23 @@ ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s ; RUN: grep fceq %t1.s | count 1 ; RUN: grep fcmeq %t1.s | count 1 -; -; This file includes standard floating point arithmetic instructions + target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" target triple = "spu" +; Exercise the floating point comparison operators for f32: + declare double @fabs(double) declare float @fabsf(float) define i1 @fcmp_eq(float %arg1, float %arg2) { - %A = fcmp oeq float %arg1, %arg2 ; <float> [#uses=1] + %A = fcmp oeq float %arg1, %arg2 ret i1 %A } define i1 @fcmp_mag_eq(float %arg1, float %arg2) { - %A = call float @fabsf(float %arg1) ; <float> [#uses=1] - %B = call float @fabsf(float %arg2) ; <float> [#uses=1] - %C = fcmp oeq float %A, %B ; <float> [#uses=1] - ret i1 %C + %1 = call float @fabsf(float %arg1) + %2 = call float @fabsf(float %arg2) + %3 = fcmp oeq float %1, %2 + ret i1 %3 } diff --git a/test/CodeGen/CellSPU/fcmp64.ll b/test/CodeGen/CellSPU/fcmp64.ll new file mode 100644 index 0000000..1906bfe --- /dev/null +++ b/test/CodeGen/CellSPU/fcmp64.ll @@ -0,0 +1,7 @@ +; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s + +define i1 @fcmp_eq_setcc_f64(double %arg1, double %arg2) nounwind { +entry: + %A = fcmp oeq double %arg1, %arg2 + ret i1 %A +} diff --git a/test/CodeGen/CellSPU/fneg-fabs.ll b/test/CodeGen/CellSPU/fneg-fabs.ll index 70220a5..b6eca10 100644 --- a/test/CodeGen/CellSPU/fneg-fabs.ll +++ b/test/CodeGen/CellSPU/fneg-fabs.ll @@ -1,9 +1,10 @@ ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s -; RUN: grep fsmbi %t1.s | count 2 +; RUN: grep fsmbi %t1.s | count 3 ; RUN: grep 32768 %t1.s | count 2 ; RUN: grep xor %t1.s | count 4 -; RUN: grep and %t1.s | count 4 -; RUN: grep andbi %t1.s | count 2 +; RUN: grep and %t1.s | count 5 +; RUN: grep andbi %t1.s | count 3 + target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" target triple = "spu" @@ -33,11 +34,11 @@ declare double @fabs(double) declare float @fabsf(float) define double @fabs_dp(double %X) { - %Y = call double @fabs( double %X ) ; <double> [#uses=1] + %Y = call double @fabs( double %X ) ret double %Y } define float @fabs_sp(float %X) { - %Y = call float @fabsf( float %X ) ; <float> [#uses=1] + %Y = call float @fabsf( float %X ) ret float %Y } diff --git a/test/CodeGen/CellSPU/select_bits.ll b/test/CodeGen/CellSPU/select_bits.ll index 3a7334d..e83e476 100644 --- a/test/CodeGen/CellSPU/select_bits.ll +++ b/test/CodeGen/CellSPU/select_bits.ll @@ -1,5 +1,5 @@ ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s -; RUN: grep selb %t1.s | count 280 +; RUN: grep selb %t1.s | count 56 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" target triple = "spu" @@ -9,7 +9,7 @@ target triple = "spu" ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define <2 x i64> @selb_v2i64_01(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_01(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %C = and <2 x i64> %rC, %rB %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %A, %rA @@ -18,7 +18,7 @@ define <2 x i64> @selb_v2i64_01(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define <2 x i64> @selb_v2i64_02(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_02(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %C = and <2 x i64> %rB, %rC %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %A, %rA @@ -27,7 +27,7 @@ define <2 x i64> @selb_v2i64_02(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define <2 x i64> @selb_v2i64_03(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_03(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %A, %rA %C = and <2 x i64> %rB, %rC @@ -36,7 +36,7 @@ define <2 x i64> @selb_v2i64_03(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define <2 x i64> @selb_v2i64_04(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_04(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %A, %rA %C = and <2 x i64> %rC, %rB @@ -45,7 +45,7 @@ define <2 x i64> @selb_v2i64_04(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define <2 x i64> @selb_v2i64_05(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_05(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %C = and <2 x i64> %rC, %rB %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %rA, %A @@ -54,7 +54,7 @@ define <2 x i64> @selb_v2i64_05(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define <2 x i64> @selb_v2i64_06(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_06(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %C = and <2 x i64> %rB, %rC %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %rA, %A @@ -63,7 +63,7 @@ define <2 x i64> @selb_v2i64_06(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define <2 x i64> @selb_v2i64_07(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_07(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %rA, %A %C = and <2 x i64> %rB, %rC @@ -72,7 +72,7 @@ define <2 x i64> @selb_v2i64_07(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define <2 x i64> @selb_v2i64_08(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_08(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %rA, %A %C = and <2 x i64> %rC, %rB @@ -85,7 +85,7 @@ define <2 x i64> @selb_v2i64_08(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define <4 x i32> @selb_v4i32_01(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_01(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %C = and <4 x i32> %rC, %rB %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1 > %B = and <4 x i32> %A, %rA @@ -94,7 +94,7 @@ define <4 x i32> @selb_v4i32_01(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define <4 x i32> @selb_v4i32_02(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_02(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %C = and <4 x i32> %rB, %rC %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1 > %B = and <4 x i32> %A, %rA @@ -103,7 +103,7 @@ define <4 x i32> @selb_v4i32_02(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define <4 x i32> @selb_v4i32_03(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_03(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1 > %B = and <4 x i32> %A, %rA %C = and <4 x i32> %rB, %rC @@ -112,7 +112,7 @@ define <4 x i32> @selb_v4i32_03(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define <4 x i32> @selb_v4i32_04(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_04(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1> %B = and <4 x i32> %A, %rA %C = and <4 x i32> %rC, %rB @@ -121,7 +121,7 @@ define <4 x i32> @selb_v4i32_04(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define <4 x i32> @selb_v4i32_05(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_05(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %C = and <4 x i32> %rC, %rB %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1> %B = and <4 x i32> %rA, %A @@ -130,7 +130,7 @@ define <4 x i32> @selb_v4i32_05(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define <4 x i32> @selb_v4i32_06(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_06(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %C = and <4 x i32> %rB, %rC %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1> %B = and <4 x i32> %rA, %A @@ -139,7 +139,7 @@ define <4 x i32> @selb_v4i32_06(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define <4 x i32> @selb_v4i32_07(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_07(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1> %B = and <4 x i32> %rA, %A %C = and <4 x i32> %rB, %rC @@ -148,7 +148,7 @@ define <4 x i32> @selb_v4i32_07(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define <4 x i32> @selb_v4i32_08(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_08(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1> %B = and <4 x i32> %rA, %A %C = and <4 x i32> %rC, %rB @@ -161,7 +161,7 @@ define <4 x i32> @selb_v4i32_08(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define <8 x i16> @selb_v8i16_01(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_01(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %C = and <8 x i16> %rC, %rB %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > @@ -171,7 +171,7 @@ define <8 x i16> @selb_v8i16_01(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define <8 x i16> @selb_v8i16_02(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_02(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %C = and <8 x i16> %rB, %rC %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > @@ -181,7 +181,7 @@ define <8 x i16> @selb_v8i16_02(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define <8 x i16> @selb_v8i16_03(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_03(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > %B = and <8 x i16> %A, %rA @@ -191,7 +191,7 @@ define <8 x i16> @selb_v8i16_03(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define <8 x i16> @selb_v8i16_04(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_04(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > %B = and <8 x i16> %A, %rA @@ -201,7 +201,7 @@ define <8 x i16> @selb_v8i16_04(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define <8 x i16> @selb_v8i16_05(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_05(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %C = and <8 x i16> %rC, %rB %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > @@ -211,7 +211,7 @@ define <8 x i16> @selb_v8i16_05(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define <8 x i16> @selb_v8i16_06(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_06(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %C = and <8 x i16> %rB, %rC %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > @@ -221,7 +221,7 @@ define <8 x i16> @selb_v8i16_06(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define <8 x i16> @selb_v8i16_07(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_07(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > %B = and <8 x i16> %rA, %A @@ -231,7 +231,7 @@ define <8 x i16> @selb_v8i16_07(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define <8 x i16> @selb_v8i16_08(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_08(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > %B = and <8 x i16> %rA, %A @@ -245,7 +245,7 @@ define <8 x i16> @selb_v8i16_08(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define <16 x i8> @selb_v16i8_01(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_01(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %C = and <16 x i8> %rC, %rB %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -257,7 +257,7 @@ define <16 x i8> @selb_v16i8_01(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define <16 x i8> @selb_v16i8_02(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_02(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %C = and <16 x i8> %rB, %rC %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -269,7 +269,7 @@ define <16 x i8> @selb_v16i8_02(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define <16 x i8> @selb_v16i8_03(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_03(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -281,7 +281,7 @@ define <16 x i8> @selb_v16i8_03(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define <16 x i8> @selb_v16i8_04(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_04(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -293,7 +293,7 @@ define <16 x i8> @selb_v16i8_04(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define <16 x i8> @selb_v16i8_05(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_05(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %C = and <16 x i8> %rC, %rB %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -305,7 +305,7 @@ define <16 x i8> @selb_v16i8_05(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define <16 x i8> @selb_v16i8_06(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_06(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %C = and <16 x i8> %rB, %rC %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -317,7 +317,7 @@ define <16 x i8> @selb_v16i8_06(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define <16 x i8> @selb_v16i8_07(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_07(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -329,7 +329,7 @@ define <16 x i8> @selb_v16i8_07(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define <16 x i8> @selb_v16i8_08(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_08(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -345,7 +345,7 @@ define <16 x i8> @selb_v16i8_08(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define i32 @selb_i32_01(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_01(i32 %rA, i32 %rB, i32 %rC) { %C = and i32 %rC, %rB %A = xor i32 %rC, -1 %B = and i32 %A, %rA @@ -354,7 +354,7 @@ define i32 @selb_i32_01(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define i32 @selb_i32_02(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_02(i32 %rA, i32 %rB, i32 %rC) { %C = and i32 %rB, %rC %A = xor i32 %rC, -1 %B = and i32 %A, %rA @@ -363,7 +363,7 @@ define i32 @selb_i32_02(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define i32 @selb_i32_03(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_03(i32 %rA, i32 %rB, i32 %rC) { %A = xor i32 %rC, -1 %B = and i32 %A, %rA %C = and i32 %rB, %rC @@ -372,7 +372,7 @@ define i32 @selb_i32_03(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define i32 @selb_i32_04(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_04(i32 %rA, i32 %rB, i32 %rC) { %A = xor i32 %rC, -1 %B = and i32 %A, %rA %C = and i32 %rC, %rB @@ -381,7 +381,7 @@ define i32 @selb_i32_04(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define i32 @selb_i32_05(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_05(i32 %rA, i32 %rB, i32 %rC) { %C = and i32 %rC, %rB %A = xor i32 %rC, -1 %B = and i32 %rA, %A @@ -390,7 +390,7 @@ define i32 @selb_i32_05(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define i32 @selb_i32_06(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_06(i32 %rA, i32 %rB, i32 %rC) { %C = and i32 %rB, %rC %A = xor i32 %rC, -1 %B = and i32 %rA, %A @@ -399,7 +399,7 @@ define i32 @selb_i32_06(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define i32 @selb_i32_07(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_07(i32 %rA, i32 %rB, i32 %rC) { %A = xor i32 %rC, -1 %B = and i32 %rA, %A %C = and i32 %rB, %rC @@ -408,7 +408,7 @@ define i32 @selb_i32_07(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define i32 @selb_i32_08(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_08(i32 %rA, i32 %rB, i32 %rC) { %A = xor i32 %rC, -1 %B = and i32 %rA, %A %C = and i32 %rC, %rB @@ -421,7 +421,7 @@ define i32 @selb_i32_08(i32 %rA, i32 %rB, i32 %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define i16 @selb_i16_01(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_01(i16 %rA, i16 %rB, i16 %rC) { %C = and i16 %rC, %rB %A = xor i16 %rC, -1 %B = and i16 %A, %rA @@ -430,7 +430,7 @@ define i16 @selb_i16_01(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define i16 @selb_i16_02(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_02(i16 %rA, i16 %rB, i16 %rC) { %C = and i16 %rB, %rC %A = xor i16 %rC, -1 %B = and i16 %A, %rA @@ -439,7 +439,7 @@ define i16 @selb_i16_02(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define i16 @selb_i16_03(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_03(i16 %rA, i16 %rB, i16 %rC) { %A = xor i16 %rC, -1 %B = and i16 %A, %rA %C = and i16 %rB, %rC @@ -448,7 +448,7 @@ define i16 @selb_i16_03(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define i16 @selb_i16_04(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_04(i16 %rA, i16 %rB, i16 %rC) { %A = xor i16 %rC, -1 %B = and i16 %A, %rA %C = and i16 %rC, %rB @@ -457,7 +457,7 @@ define i16 @selb_i16_04(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define i16 @selb_i16_05(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_05(i16 %rA, i16 %rB, i16 %rC) { %C = and i16 %rC, %rB %A = xor i16 %rC, -1 %B = and i16 %rA, %A @@ -466,7 +466,7 @@ define i16 @selb_i16_05(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define i16 @selb_i16_06(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_06(i16 %rA, i16 %rB, i16 %rC) { %C = and i16 %rB, %rC %A = xor i16 %rC, -1 %B = and i16 %rA, %A @@ -475,7 +475,7 @@ define i16 @selb_i16_06(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define i16 @selb_i16_07(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_07(i16 %rA, i16 %rB, i16 %rC) { %A = xor i16 %rC, -1 %B = and i16 %rA, %A %C = and i16 %rB, %rC @@ -484,7 +484,7 @@ define i16 @selb_i16_07(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define i16 @selb_i16_08(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_08(i16 %rA, i16 %rB, i16 %rC) { %A = xor i16 %rC, -1 %B = and i16 %rA, %A %C = and i16 %rC, %rB @@ -497,7 +497,7 @@ define i16 @selb_i16_08(i16 %rA, i16 %rB, i16 %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define i8 @selb_i8_01(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_01(i8 %rA, i8 %rB, i8 %rC) { %C = and i8 %rC, %rB %A = xor i8 %rC, -1 %B = and i8 %A, %rA @@ -506,7 +506,7 @@ define i8 @selb_i8_01(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define i8 @selb_i8_02(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_02(i8 %rA, i8 %rB, i8 %rC) { %C = and i8 %rB, %rC %A = xor i8 %rC, -1 %B = and i8 %A, %rA @@ -515,7 +515,7 @@ define i8 @selb_i8_02(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define i8 @selb_i8_03(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_03(i8 %rA, i8 %rB, i8 %rC) { %A = xor i8 %rC, -1 %B = and i8 %A, %rA %C = and i8 %rB, %rC @@ -524,7 +524,7 @@ define i8 @selb_i8_03(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define i8 @selb_i8_04(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_04(i8 %rA, i8 %rB, i8 %rC) { %A = xor i8 %rC, -1 %B = and i8 %A, %rA %C = and i8 %rC, %rB @@ -533,7 +533,7 @@ define i8 @selb_i8_04(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define i8 @selb_i8_05(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_05(i8 %rA, i8 %rB, i8 %rC) { %C = and i8 %rC, %rB %A = xor i8 %rC, -1 %B = and i8 %rA, %A @@ -542,7 +542,7 @@ define i8 @selb_i8_05(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define i8 @selb_i8_06(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_06(i8 %rA, i8 %rB, i8 %rC) { %C = and i8 %rB, %rC %A = xor i8 %rC, -1 %B = and i8 %rA, %A @@ -551,7 +551,7 @@ define i8 @selb_i8_06(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define i8 @selb_i8_07(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_07(i8 %rA, i8 %rB, i8 %rC) { %A = xor i8 %rC, -1 %B = and i8 %rA, %A %C = and i8 %rB, %rC @@ -560,7 +560,7 @@ define i8 @selb_i8_07(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define i8 @selb_i8_08(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_08(i8 %rA, i8 %rB, i8 %rC) { %A = xor i8 %rC, -1 %B = and i8 %rA, %A %C = and i8 %rC, %rB diff --git a/test/CodeGen/CellSPU/shift_ops.ll b/test/CodeGen/CellSPU/shift_ops.ll index 2df2f96..3c26baa 100644 --- a/test/CodeGen/CellSPU/shift_ops.ll +++ b/test/CodeGen/CellSPU/shift_ops.ll @@ -275,3 +275,9 @@ define i64 @ashr_i64_3(i64 %arg1, i32 %shift) { %2 = ashr i64 %arg1, %1 ret i64 %2 } + +define i32 @hi32_i64(i64 %arg) { + %1 = lshr i64 %arg, 32 + %2 = trunc i64 %1 to i32 + ret i32 %2 +} |