diff options
author | Pirama Arumuga Nainar <pirama@google.com> | 2015-05-06 11:46:36 -0700 |
---|---|---|
committer | Pirama Arumuga Nainar <pirama@google.com> | 2015-05-18 10:52:30 -0700 |
commit | 2c3e0051c31c3f5b2328b447eadf1cf9c4427442 (patch) | |
tree | c0104029af14e9f47c2ef58ca60e6137691f3c9b /test/CodeGen/X86/nontemporal-2.ll | |
parent | e1bc145815f4334641be19f1c45ecf85d25b6e5a (diff) | |
download | external_llvm-2c3e0051c31c3f5b2328b447eadf1cf9c4427442.zip external_llvm-2c3e0051c31c3f5b2328b447eadf1cf9c4427442.tar.gz external_llvm-2c3e0051c31c3f5b2328b447eadf1cf9c4427442.tar.bz2 |
Update aosp/master LLVM for rebase to r235153
Change-Id: I9bf53792f9fc30570e81a8d80d296c681d005ea7
(cherry picked from commit 0c7f116bb6950ef819323d855415b2f2b0aad987)
Diffstat (limited to 'test/CodeGen/X86/nontemporal-2.ll')
-rw-r--r-- | test/CodeGen/X86/nontemporal-2.ll | 286 |
1 files changed, 279 insertions, 7 deletions
diff --git a/test/CodeGen/X86/nontemporal-2.ll b/test/CodeGen/X86/nontemporal-2.ll index f62f372..8c08b3c 100644 --- a/test/CodeGen/X86/nontemporal-2.ll +++ b/test/CodeGen/X86/nontemporal-2.ll @@ -1,31 +1,303 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX - +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2 ; Make sure that we generate non-temporal stores for the test cases below. +; We use xorps for zeroing, so domain information isn't available anymore. -define void @test1(<4 x float>* %dst) { -; CHECK-LABEL: test1: +define void @test_zero_v4f32(<4 x float>* %dst) { +; CHECK-LABEL: test_zero_v4f32: ; SSE: movntps ; AVX: vmovntps store <4 x float> zeroinitializer, <4 x float>* %dst, align 16, !nontemporal !1 ret void } -define void @test2(<4 x i32>* %dst) { -; CHECK-LABEL: test2: +define void @test_zero_v4i32(<4 x i32>* %dst) { +; CHECK-LABEL: test_zero_v4i32: ; SSE: movntps ; AVX: vmovntps store <4 x i32> zeroinitializer, <4 x i32>* %dst, align 16, !nontemporal !1 ret void } -define void @test3(<2 x double>* %dst) { -; CHECK-LABEL: test3: +define void @test_zero_v2f64(<2 x double>* %dst) { +; CHECK-LABEL: test_zero_v2f64: ; SSE: movntps ; AVX: vmovntps store <2 x double> zeroinitializer, <2 x double>* %dst, align 16, !nontemporal !1 ret void } +define void @test_zero_v2i64(<2 x i64>* %dst) { +; CHECK-LABEL: test_zero_v2i64: +; SSE: movntps +; AVX: vmovntps + store <2 x i64> zeroinitializer, <2 x i64>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_zero_v8i16(<8 x i16>* %dst) { +; CHECK-LABEL: test_zero_v8i16: +; SSE: movntps +; AVX: vmovntps + store <8 x i16> zeroinitializer, <8 x i16>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_zero_v16i8(<16 x i8>* %dst) { +; CHECK-LABEL: test_zero_v16i8: +; SSE: movntps +; AVX: vmovntps + store <16 x i8> zeroinitializer, <16 x i8>* %dst, align 16, !nontemporal !1 + ret void +} + +; And now YMM versions. + +define void @test_zero_v8f32(<8 x float>* %dst) { +; CHECK-LABEL: test_zero_v8f32: +; AVX: vmovntps %ymm + store <8 x float> zeroinitializer, <8 x float>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_zero_v8i32(<8 x i32>* %dst) { +; CHECK-LABEL: test_zero_v8i32: +; AVX2: vmovntps %ymm + store <8 x i32> zeroinitializer, <8 x i32>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_zero_v4f64(<4 x double>* %dst) { +; CHECK-LABEL: test_zero_v4f64: +; AVX: vmovntps %ymm + store <4 x double> zeroinitializer, <4 x double>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_zero_v4i64(<4 x i64>* %dst) { +; CHECK-LABEL: test_zero_v4i64: +; AVX2: vmovntps %ymm + store <4 x i64> zeroinitializer, <4 x i64>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_zero_v16i16(<16 x i16>* %dst) { +; CHECK-LABEL: test_zero_v16i16: +; AVX2: vmovntps %ymm + store <16 x i16> zeroinitializer, <16 x i16>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_zero_v32i8(<32 x i8>* %dst) { +; CHECK-LABEL: test_zero_v32i8: +; AVX2: vmovntps %ymm + store <32 x i8> zeroinitializer, <32 x i8>* %dst, align 32, !nontemporal !1 + ret void +} + + +; Check that we also handle arguments. Here the type survives longer. + +define void @test_arg_v4f32(<4 x float> %arg, <4 x float>* %dst) { +; CHECK-LABEL: test_arg_v4f32: +; SSE: movntps +; AVX: vmovntps + store <4 x float> %arg, <4 x float>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %dst) { +; CHECK-LABEL: test_arg_v4i32: +; SSE: movntps +; AVX: vmovntps + store <4 x i32> %arg, <4 x i32>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_arg_v2f64(<2 x double> %arg, <2 x double>* %dst) { +; CHECK-LABEL: test_arg_v2f64: +; SSE: movntps +; AVX: vmovntps + store <2 x double> %arg, <2 x double>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %dst) { +; CHECK-LABEL: test_arg_v2i64: +; SSE: movntps +; AVX: vmovntps + store <2 x i64> %arg, <2 x i64>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %dst) { +; CHECK-LABEL: test_arg_v8i16: +; SSE: movntps +; AVX: vmovntps + store <8 x i16> %arg, <8 x i16>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %dst) { +; CHECK-LABEL: test_arg_v16i8: +; SSE: movntps +; AVX: vmovntps + store <16 x i8> %arg, <16 x i8>* %dst, align 16, !nontemporal !1 + ret void +} + +; And now YMM versions. + +define void @test_arg_v8f32(<8 x float> %arg, <8 x float>* %dst) { +; CHECK-LABEL: test_arg_v8f32: +; AVX: vmovntps %ymm + store <8 x float> %arg, <8 x float>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %dst) { +; CHECK-LABEL: test_arg_v8i32: +; AVX2: vmovntps %ymm + store <8 x i32> %arg, <8 x i32>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_arg_v4f64(<4 x double> %arg, <4 x double>* %dst) { +; CHECK-LABEL: test_arg_v4f64: +; AVX: vmovntps %ymm + store <4 x double> %arg, <4 x double>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %dst) { +; CHECK-LABEL: test_arg_v4i64: +; AVX2: vmovntps %ymm + store <4 x i64> %arg, <4 x i64>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %dst) { +; CHECK-LABEL: test_arg_v16i16: +; AVX2: vmovntps %ymm + store <16 x i16> %arg, <16 x i16>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %dst) { +; CHECK-LABEL: test_arg_v32i8: +; AVX2: vmovntps %ymm + store <32 x i8> %arg, <32 x i8>* %dst, align 32, !nontemporal !1 + ret void +} + + +; Now check that if the execution domain is trivially visible, we use it. +; We use an add to make the type survive all the way to the MOVNT. + +define void @test_op_v4f32(<4 x float> %a, <4 x float> %b, <4 x float>* %dst) { +; CHECK-LABEL: test_op_v4f32: +; SSE: movntps +; AVX: vmovntps + %r = fadd <4 x float> %a, %b + store <4 x float> %r, <4 x float>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_op_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32>* %dst) { +; CHECK-LABEL: test_op_v4i32: +; SSE: movntdq +; AVX: vmovntdq + %r = add <4 x i32> %a, %b + store <4 x i32> %r, <4 x i32>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_op_v2f64(<2 x double> %a, <2 x double> %b, <2 x double>* %dst) { +; CHECK-LABEL: test_op_v2f64: +; SSE: movntpd +; AVX: vmovntpd + %r = fadd <2 x double> %a, %b + store <2 x double> %r, <2 x double>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_op_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64>* %dst) { +; CHECK-LABEL: test_op_v2i64: +; SSE: movntdq +; AVX: vmovntdq + %r = add <2 x i64> %a, %b + store <2 x i64> %r, <2 x i64>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_op_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16>* %dst) { +; CHECK-LABEL: test_op_v8i16: +; SSE: movntdq +; AVX: vmovntdq + %r = add <8 x i16> %a, %b + store <8 x i16> %r, <8 x i16>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_op_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8>* %dst) { +; CHECK-LABEL: test_op_v16i8: +; SSE: movntdq +; AVX: vmovntdq + %r = add <16 x i8> %a, %b + store <16 x i8> %r, <16 x i8>* %dst, align 16, !nontemporal !1 + ret void +} + +; And now YMM versions. + +define void @test_op_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %dst) { +; CHECK-LABEL: test_op_v8f32: +; AVX: vmovntps %ymm + %r = fadd <8 x float> %a, %b + store <8 x float> %r, <8 x float>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_op_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %dst) { +; CHECK-LABEL: test_op_v8i32: +; AVX2: vmovntdq %ymm + %r = add <8 x i32> %a, %b + store <8 x i32> %r, <8 x i32>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_op_v4f64(<4 x double> %a, <4 x double> %b, <4 x double>* %dst) { +; CHECK-LABEL: test_op_v4f64: +; AVX: vmovntpd %ymm + %r = fadd <4 x double> %a, %b + store <4 x double> %r, <4 x double>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_op_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %dst) { +; CHECK-LABEL: test_op_v4i64: +; AVX2: vmovntdq %ymm + %r = add <4 x i64> %a, %b + store <4 x i64> %r, <4 x i64>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_op_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %dst) { +; CHECK-LABEL: test_op_v16i16: +; AVX2: vmovntdq %ymm + %r = add <16 x i16> %a, %b + store <16 x i16> %r, <16 x i16>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_op_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %dst) { +; CHECK-LABEL: test_op_v32i8: +; AVX2: vmovntdq %ymm + %r = add <32 x i8> %a, %b + store <32 x i8> %r, <32 x i8>* %dst, align 32, !nontemporal !1 + ret void +} + !1 = !{i32 1} |