From 48177ac90fb940833b9deea1a6716092348cfe82 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Fri, 18 Jan 2013 23:10:30 +0000 Subject: On Sandybridge loading unaligned 256bits using two XMM loads (vmovups and vinsertf128) is faster than using a single vmovups instruction. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@172868 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/v8i1-masks.ll | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'test/CodeGen/X86/v8i1-masks.ll') diff --git a/test/CodeGen/X86/v8i1-masks.ll b/test/CodeGen/X86/v8i1-masks.ll index abb4b39..ea231af 100644 --- a/test/CodeGen/X86/v8i1-masks.ll +++ b/test/CodeGen/X86/v8i1-masks.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -o - < %s | FileCheck %s ;CHECK: and_masks -;CHECK: vmovups +;CHECK: vmovaps ;CHECK: vcmpltp ;CHECK: vcmpltp ;CHECK: vandps -- cgit v1.1