diff options
author | Mon P Wang <wangmp@apple.com> | 2009-01-28 08:13:56 +0000 |
---|---|---|
committer | Mon P Wang <wangmp@apple.com> | 2009-01-28 08:13:56 +0000 |
commit | f4646d9902fcdcb121dbfbabbc2ee00bb41d7ca1 (patch) | |
tree | 85ddefbeecd56a593cbb7bc47a8e684c2578c139 | |
parent | 3becd093cc308578ca979edbbab0f76d98cef4ec (diff) | |
download | external_llvm-f4646d9902fcdcb121dbfbabbc2ee00bb41d7ca1.zip external_llvm-f4646d9902fcdcb121dbfbabbc2ee00bb41d7ca1.tar.gz external_llvm-f4646d9902fcdcb121dbfbabbc2ee00bb41d7ca1.tar.bz2 |
Added sse test patterns for r62979 and r63193.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@63194 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | test/CodeGen/X86/vshift-1.ll | 65 | ||||
-rw-r--r-- | test/CodeGen/X86/vshift-2.ll | 64 | ||||
-rw-r--r-- | test/CodeGen/X86/vshift-3.ll | 54 | ||||
-rw-r--r-- | test/CodeGen/X86/vshift-4.ll | 71 |
4 files changed, 254 insertions, 0 deletions
diff --git a/test/CodeGen/X86/vshift-1.ll b/test/CodeGen/X86/vshift-1.ll new file mode 100644 index 0000000..d7a20e4 --- /dev/null +++ b/test/CodeGen/X86/vshift-1.ll @@ -0,0 +1,65 @@ +; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -disable-mmx -o %t -f +; RUN: grep psllq %t | count 2 +; RUN: grep pslld %t | count 2 +; RUN: grep psllw %t | count 2 + +; test vector shifts converted to proper SSE2 vector shifts when the shift +; amounts are the same. + +define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind { +entry: + %shl = shl <2 x i64> %val, < i64 32, i64 32 > + store <2 x i64> %shl, <2 x i64>* %dst + ret void +} + +define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind { +entry: + %0 = insertelement <2 x i64> undef, i64 %amt, i32 0 + %1 = insertelement <2 x i64> %0, i64 %amt, i32 1 + %shl = shl <2 x i64> %val, %1 + store <2 x i64> %shl, <2 x i64>* %dst + ret void +} + + +define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind { +entry: + %shl = shl <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 > + store <4 x i32> %shl, <4 x i32>* %dst + ret void +} + +define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind { +entry: + %0 = insertelement <4 x i32> undef, i32 %amt, i32 0 + %1 = insertelement <4 x i32> %0, i32 %amt, i32 1 + %2 = insertelement <4 x i32> %1, i32 %amt, i32 2 + %3 = insertelement <4 x i32> %2, i32 %amt, i32 3 + %shl = shl <4 x i32> %val, %3 + store <4 x i32> %shl, <4 x i32>* %dst + ret void +} + +define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind { +entry: + %shl = shl <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > + store <8 x i16> %shl, <8 x i16>* %dst + ret void +} + +define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind { +entry: + %0 = insertelement <8 x i16> undef, i16 %amt, i32 0 + %1 = insertelement <8 x i16> %0, i16 %amt, i32 1 + %2 = insertelement <8 x i16> %0, i16 %amt, i32 2 + %3 = insertelement <8 x i16> %0, i16 %amt, i32 3 + %4 = insertelement <8 x i16> %0, i16 %amt, i32 4 + %5 = insertelement <8 x i16> %0, i16 %amt, i32 5 + %6 = insertelement <8 x i16> %0, i16 %amt, i32 6 + %7 = insertelement <8 x i16> %0, i16 %amt, i32 7 + %shl = shl <8 x i16> %val, %7 + store <8 x i16> %shl, <8 x i16>* %dst + ret void +} + diff --git a/test/CodeGen/X86/vshift-2.ll b/test/CodeGen/X86/vshift-2.ll new file mode 100644 index 0000000..0807174 --- /dev/null +++ b/test/CodeGen/X86/vshift-2.ll @@ -0,0 +1,64 @@ +; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -disable-mmx -o %t -f +; RUN: grep psrlq %t | count 2 +; RUN: grep psrld %t | count 2 +; RUN: grep psrlw %t | count 2 + +; test vector shifts converted to proper SSE2 vector shifts when the shift +; amounts are the same. + +define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind { +entry: + %lshr = lshr <2 x i64> %val, < i64 32, i64 32 > + store <2 x i64> %lshr, <2 x i64>* %dst + ret void +} + +define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind { +entry: + %0 = insertelement <2 x i64> undef, i64 %amt, i32 0 + %1 = insertelement <2 x i64> %0, i64 %amt, i32 1 + %lshr = lshr <2 x i64> %val, %1 + store <2 x i64> %lshr, <2 x i64>* %dst + ret void +} + +define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind { +entry: + %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 > + store <4 x i32> %lshr, <4 x i32>* %dst + ret void +} + +define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind { +entry: + %0 = insertelement <4 x i32> undef, i32 %amt, i32 0 + %1 = insertelement <4 x i32> %0, i32 %amt, i32 1 + %2 = insertelement <4 x i32> %1, i32 %amt, i32 2 + %3 = insertelement <4 x i32> %2, i32 %amt, i32 3 + %lshr = lshr <4 x i32> %val, %3 + store <4 x i32> %lshr, <4 x i32>* %dst + ret void +} + + +define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind { +entry: + %lshr = lshr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > + store <8 x i16> %lshr, <8 x i16>* %dst + ret void +} + +define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind { +entry: + %0 = insertelement <8 x i16> undef, i16 %amt, i32 0 + %1 = insertelement <8 x i16> %0, i16 %amt, i32 1 + %2 = insertelement <8 x i16> %0, i16 %amt, i32 2 + %3 = insertelement <8 x i16> %0, i16 %amt, i32 3 + %4 = insertelement <8 x i16> %0, i16 %amt, i32 4 + %5 = insertelement <8 x i16> %0, i16 %amt, i32 5 + %6 = insertelement <8 x i16> %0, i16 %amt, i32 6 + %7 = insertelement <8 x i16> %0, i16 %amt, i32 7 + %lshr = lshr <8 x i16> %val, %7 + store <8 x i16> %lshr, <8 x i16>* %dst + ret void +}
\ No newline at end of file diff --git a/test/CodeGen/X86/vshift-3.ll b/test/CodeGen/X86/vshift-3.ll new file mode 100644 index 0000000..eea8ad1 --- /dev/null +++ b/test/CodeGen/X86/vshift-3.ll @@ -0,0 +1,54 @@ +; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -disable-mmx -o %t -f +; RUN: grep psrad %t | count 2 +; RUN: grep psraw %t | count 2 + +; test vector shifts converted to proper SSE2 vector shifts when the shift +; amounts are the same. + +; Note that x86 does have ashr +define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind { +entry: + %ashr = ashr <2 x i64> %val, < i64 32, i64 32 > + store <2 x i64> %ashr, <2 x i64>* %dst + ret void +} + +define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind { +entry: + %ashr = ashr <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 > + store <4 x i32> %ashr, <4 x i32>* %dst + ret void +} + +define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind { +entry: + %0 = insertelement <4 x i32> undef, i32 %amt, i32 0 + %1 = insertelement <4 x i32> %0, i32 %amt, i32 1 + %2 = insertelement <4 x i32> %1, i32 %amt, i32 2 + %3 = insertelement <4 x i32> %2, i32 %amt, i32 3 + %ashr = ashr <4 x i32> %val, %3 + store <4 x i32> %ashr, <4 x i32>* %dst + ret void +} + +define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind { +entry: + %ashr = ashr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > + store <8 x i16> %ashr, <8 x i16>* %dst + ret void +} + +define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind { +entry: + %0 = insertelement <8 x i16> undef, i16 %amt, i32 0 + %1 = insertelement <8 x i16> %0, i16 %amt, i32 1 + %2 = insertelement <8 x i16> %0, i16 %amt, i32 2 + %3 = insertelement <8 x i16> %0, i16 %amt, i32 3 + %4 = insertelement <8 x i16> %0, i16 %amt, i32 4 + %5 = insertelement <8 x i16> %0, i16 %amt, i32 5 + %6 = insertelement <8 x i16> %0, i16 %amt, i32 6 + %7 = insertelement <8 x i16> %0, i16 %amt, i32 7 + %ashr = ashr <8 x i16> %val, %7 + store <8 x i16> %ashr, <8 x i16>* %dst + ret void +}
\ No newline at end of file diff --git a/test/CodeGen/X86/vshift-4.ll b/test/CodeGen/X86/vshift-4.ll new file mode 100644 index 0000000..03ab95c --- /dev/null +++ b/test/CodeGen/X86/vshift-4.ll @@ -0,0 +1,71 @@ +; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -disable-mmx -o %t -f +; RUN: grep psllq %t | count 1 +; RUN: grep pslld %t | count 3 +; RUN: grep psllw %t | count 2 + +; test vector shifts converted to proper SSE2 vector shifts when the shift +; amounts are the same when using a shuffle splat. + +define void @shift1a(<2 x i64> %val, <2 x i64>* %dst, <2 x i64> %sh) nounwind { +entry: + %shamt = shufflevector <2 x i64> %sh, <2 x i64> undef, <2 x i32> <i32 0, i32 0> + %shl = shl <2 x i64> %val, %shamt + store <2 x i64> %shl, <2 x i64>* %dst + ret void +} + +define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, <2 x i64> %sh) nounwind { +entry: + %shamt = shufflevector <2 x i64> %sh, <2 x i64> undef, <2 x i32> <i32 0, i32 1> + %shl = shl <2 x i64> %val, %shamt + store <2 x i64> %shl, <2 x i64>* %dst + ret void +} + +define void @shift2a(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind { +entry: + %shamt = shufflevector <2 x i32> %amt, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> + %shl = shl <4 x i32> %val, %shamt + store <4 x i32> %shl, <4 x i32>* %dst + ret void +} + +define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind { +entry: + %shamt = shufflevector <2 x i32> %amt, <2 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 1, i32 1> + %shl = shl <4 x i32> %val, %shamt + store <4 x i32> %shl, <4 x i32>* %dst + ret void +} + +define void @shift2c(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind { +entry: + %shamt = shufflevector <2 x i32> %amt, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> + %shl = shl <4 x i32> %val, %shamt + store <4 x i32> %shl, <4 x i32>* %dst + ret void +} + +define void @shift3a(<8 x i16> %val, <8 x i16>* %dst, <8 x i16> %amt) nounwind { +entry: + %shamt = shufflevector <8 x i16> %amt, <8 x i16> undef, <8 x i32> <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6> + %shl = shl <8 x i16> %val, %shamt + store <8 x i16> %shl, <8 x i16>* %dst + ret void +} + +define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind { +entry: + %0 = insertelement <8 x i16> undef, i16 %amt, i32 0 + %1 = insertelement <8 x i16> %0, i16 %amt, i32 1 + %2 = insertelement <8 x i16> %0, i16 %amt, i32 2 + %3 = insertelement <8 x i16> %0, i16 %amt, i32 3 + %4 = insertelement <8 x i16> %0, i16 %amt, i32 4 + %5 = insertelement <8 x i16> %0, i16 %amt, i32 5 + %6 = insertelement <8 x i16> %0, i16 %amt, i32 6 + %7 = insertelement <8 x i16> %0, i16 %amt, i32 7 + %shl = shl <8 x i16> %val, %7 + store <8 x i16> %shl, <8 x i16>* %dst + ret void +} + |