diff options
author | Nadav Rotem <nrotem@apple.com> | 2013-06-19 15:57:29 +0000 |
---|---|---|
committer | Nadav Rotem <nrotem@apple.com> | 2013-06-19 15:57:29 +0000 |
commit | 7d180ac7b6135bd378739dc2f760a5b31cd12cae (patch) | |
tree | a35b4fce4382d7f03231c3a7492bdc1effa00908 /test/Transforms | |
parent | 7391366cdf55ca5f5faf97a9ac2b01da47e2749c (diff) | |
download | external_llvm-7d180ac7b6135bd378739dc2f760a5b31cd12cae.zip external_llvm-7d180ac7b6135bd378739dc2f760a5b31cd12cae.tar.gz external_llvm-7d180ac7b6135bd378739dc2f760a5b31cd12cae.tar.bz2 |
SLPVectorizer: start constructing chains at stores that are not power of two.
The type <3 x i8> is a common in graphics and we want to be able to vectorize it.
This changes accelerates bullet by 12% and 471_omnetpp by 5%.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@184317 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms')
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/odd_store.ll | 46 |
1 files changed, 46 insertions, 0 deletions
diff --git a/test/Transforms/SLPVectorizer/X86/odd_store.ll b/test/Transforms/SLPVectorizer/X86/odd_store.ll new file mode 100644 index 0000000..cb2b686 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/odd_store.ll @@ -0,0 +1,46 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +;int foo(char * restrict A, float * restrict B, float T) { +; A[0] = (T * B[10] + 4.0); +; A[1] = (T * B[11] + 5.0); +; A[2] = (T * B[12] + 6.0); +;} + +;CHECK: @foo +;CHECK: load <3 x float> +;CHECK: fmul <3 x float> +;CHECK: fpext <3 x float> +;CHECK: fadd <3 x double> +;CHECK: fptosi <3 x double> +;CHECK: store <3 x i8> +;CHECK: ret +define i32 @foo(i8* noalias nocapture %A, float* noalias nocapture %B, float %T) { + %1 = getelementptr inbounds float* %B, i64 10 + %2 = load float* %1, align 4 + %3 = fmul float %2, %T + %4 = fpext float %3 to double + %5 = fadd double %4, 4.000000e+00 + %6 = fptosi double %5 to i8 + store i8 %6, i8* %A, align 1 + %7 = getelementptr inbounds float* %B, i64 11 + %8 = load float* %7, align 4 + %9 = fmul float %8, %T + %10 = fpext float %9 to double + %11 = fadd double %10, 5.000000e+00 + %12 = fptosi double %11 to i8 + %13 = getelementptr inbounds i8* %A, i64 1 + store i8 %12, i8* %13, align 1 + %14 = getelementptr inbounds float* %B, i64 12 + %15 = load float* %14, align 4 + %16 = fmul float %15, %T + %17 = fpext float %16 to double + %18 = fadd double %17, 6.000000e+00 + %19 = fptosi double %18 to i8 + %20 = getelementptr inbounds i8* %A, i64 2 + store i8 %19, i8* %20, align 1 + ret i32 undef +} + |