aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/PowerPC
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/PowerPC')
-rw-r--r--test/CodeGen/PowerPC/vec_shuffle.ll16
-rw-r--r--test/CodeGen/PowerPC/vec_spat.ll12
2 files changed, 27 insertions, 1 deletions
diff --git a/test/CodeGen/PowerPC/vec_shuffle.ll b/test/CodeGen/PowerPC/vec_shuffle.ll
index efea8f2..44562fe1 100644
--- a/test/CodeGen/PowerPC/vec_shuffle.ll
+++ b/test/CodeGen/PowerPC/vec_shuffle.ll
@@ -253,6 +253,21 @@ void %tw_h(<4 x int>* %A, <4 x int>* %B) {
entry:
%tmp = load <4 x int>* %A ; <<4 x int>> [#uses=2]
%tmp2 = load <4 x int>* %B ; <<4 x int>> [#uses=2]
+ %tmp = extractelement <4 x int> %tmp2, uint 0 ; <int> [#uses=1]
+ %tmp3 = extractelement <4 x int> %tmp, uint 0 ; <int> [#uses=1]
+ %tmp4 = extractelement <4 x int> %tmp2, uint 1 ; <int> [#uses=1]
+ %tmp5 = extractelement <4 x int> %tmp, uint 1 ; <int> [#uses=1]
+ %tmp6 = insertelement <4 x int> undef, int %tmp, uint 0 ; <<4 x int>> [#uses=1]
+ %tmp7 = insertelement <4 x int> %tmp6, int %tmp3, uint 1 ; <<4 x int>> [#uses=1]
+ %tmp8 = insertelement <4 x int> %tmp7, int %tmp4, uint 2 ; <<4 x int>> [#uses=1]
+ %tmp9 = insertelement <4 x int> %tmp8, int %tmp5, uint 3 ; <<4 x int>> [#uses=1]
+ store <4 x int> %tmp9, <4 x int>* %A
+ ret void
+}
+
+void %tw_h_flop(<4 x int>* %A, <4 x int>* %B) {
+ %tmp = load <4 x int>* %A ; <<4 x int>> [#uses=2]
+ %tmp2 = load <4 x int>* %B ; <<4 x int>> [#uses=2]
%tmp = extractelement <4 x int> %tmp, uint 0 ; <int> [#uses=1]
%tmp3 = extractelement <4 x int> %tmp2, uint 0 ; <int> [#uses=1]
%tmp4 = extractelement <4 x int> %tmp, uint 1 ; <int> [#uses=1]
@@ -265,6 +280,7 @@ entry:
ret void
}
+
void %VMRG_UNARY_tb_l(<16 x sbyte>* %A, <16 x sbyte>* %B) {
entry:
%tmp = load <16 x sbyte>* %A ; <<16 x sbyte>> [#uses=16]
diff --git a/test/CodeGen/PowerPC/vec_spat.ll b/test/CodeGen/PowerPC/vec_spat.ll
index c8c6e4a..f6587b0 100644
--- a/test/CodeGen/PowerPC/vec_spat.ll
+++ b/test/CodeGen/PowerPC/vec_spat.ll
@@ -1,7 +1,7 @@
; Test that vectors are scalarized/lowered correctly.
; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vspltw | wc -l | grep 2 &&
; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g3 | grep stfs | wc -l | grep 4 &&
-; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplti | wc -l | grep 2 &&
+; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplti | wc -l | grep 3 &&
; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplth | wc -l | grep 1
%f4 = type <4 x float>
@@ -59,3 +59,13 @@ void %splat_h(short %tmp, <16 x ubyte>* %dst) {
ret void
}
+void %spltish(<16 x ubyte>* %A, <16 x ubyte>* %B) {
+ ; Gets converted to 16 x ubyte
+ %tmp = load <16 x ubyte>* %B
+ %tmp = cast <16 x ubyte> %tmp to <16 x sbyte>
+ %tmp4 = sub <16 x sbyte> %tmp, cast (<8 x short> < short 15, short 15, short 15, short 15, short 15, short 15, short 15, short 15 > to <16 x sbyte>)
+ %tmp4 = cast <16 x sbyte> %tmp4 to <16 x ubyte>
+ store <16 x ubyte> %tmp4, <16 x ubyte>* %A
+ ret void
+}
+