aboutsummaryrefslogtreecommitdiffstats
path: root/test/Analysis
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-07-21 00:45:20 -0700
committerStephen Hines <srhines@google.com>2014-07-21 00:45:20 -0700
commitc6a4f5e819217e1e12c458aed8e7b122e23a3a58 (patch)
tree81b7dd2bb4370a392f31d332a566c903b5744764 /test/Analysis
parent19c6fbb3e8aaf74093afa08013134b61fa08f245 (diff)
downloadexternal_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.zip
external_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.tar.gz
external_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.tar.bz2
Update LLVM for rebase to r212749.
Includes a cherry-pick of: r212948 - fixes a small issue with atomic calls Change-Id: Ib97bd980b59f18142a69506400911a6009d9df18
Diffstat (limited to 'test/Analysis')
-rw-r--r--test/Analysis/BasicAA/cs-cs.ll221
-rw-r--r--test/Analysis/CostModel/AArch64/lit.local.cfg3
-rw-r--r--test/Analysis/CostModel/ARM/lit.local.cfg3
-rw-r--r--test/Analysis/CostModel/PowerPC/lit.local.cfg3
-rw-r--r--test/Analysis/CostModel/X86/alternate-shuffle-cost.ll347
-rw-r--r--test/Analysis/CostModel/X86/lit.local.cfg3
-rw-r--r--test/Analysis/Delinearization/multidim_only_ivs_2d.ll12
7 files changed, 583 insertions, 9 deletions
diff --git a/test/Analysis/BasicAA/cs-cs.ll b/test/Analysis/BasicAA/cs-cs.ll
new file mode 100644
index 0000000..682e4b6
--- /dev/null
+++ b/test/Analysis/BasicAA/cs-cs.ll
@@ -0,0 +1,221 @@
+; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
+target triple = "arm-apple-ios"
+
+declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly
+declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+
+define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
+entry:
+ %q = getelementptr i8* %p, i64 16
+ %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
+ call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
+ %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
+ %c = add <8 x i16> %a, %b
+ ret <8 x i16> %c
+
+; CHECK-LABEL: Function: test1:
+
+; CHECK: NoAlias: i8* %p, i8* %q
+; CHECK: Just Ref: Ptr: i8* %p <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: Ptr: i8* %q <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: Ptr: i8* %p <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK: Both ModRef: Ptr: i8* %q <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK: Just Ref: Ptr: i8* %p <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: Ptr: i8* %q <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK: NoModRef: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
+}
+
+define void @test2(i8* %P, i8* %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2:
+
+; CHECK: MayAlias: i8* %P, i8* %Q
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test2a(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2a:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test2b(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ %R = getelementptr i8* %P, i64 12
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2b:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: NoAlias: i8* %P, i8* %R
+; CHECK: NoAlias: i8* %Q, i8* %R
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test2c(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ %R = getelementptr i8* %P, i64 11
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2c:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: NoAlias: i8* %P, i8* %R
+; CHECK: NoAlias: i8* %Q, i8* %R
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test2d(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ %R = getelementptr i8* %P, i64 -12
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2d:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: NoAlias: i8* %P, i8* %R
+; CHECK: NoAlias: i8* %Q, i8* %R
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test2e(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ %R = getelementptr i8* %P, i64 -11
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2e:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: NoAlias: i8* %P, i8* %R
+; CHECK: NoAlias: i8* %Q, i8* %R
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test3(i8* %P, i8* %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test3:
+
+; CHECK: MayAlias: i8* %P, i8* %Q
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+}
+
+define void @test3a(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test3a:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+}
+
+define void @test4(i8* %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test4:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %Q <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
+}
+
+define void @test5(i8* %P, i8* %Q, i8* %R) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test5:
+
+; CHECK: MayAlias: i8* %P, i8* %Q
+; CHECK: MayAlias: i8* %P, i8* %R
+; CHECK: MayAlias: i8* %Q, i8* %R
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+attributes #0 = { nounwind }
diff --git a/test/Analysis/CostModel/AArch64/lit.local.cfg b/test/Analysis/CostModel/AArch64/lit.local.cfg
index c420349..7184443 100644
--- a/test/Analysis/CostModel/AArch64/lit.local.cfg
+++ b/test/Analysis/CostModel/AArch64/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'AArch64' in targets:
+if not 'AArch64' in config.root.targets:
config.unsupported = True
diff --git a/test/Analysis/CostModel/ARM/lit.local.cfg b/test/Analysis/CostModel/ARM/lit.local.cfg
index 8a3ba96..98c6700 100644
--- a/test/Analysis/CostModel/ARM/lit.local.cfg
+++ b/test/Analysis/CostModel/ARM/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/Analysis/CostModel/PowerPC/lit.local.cfg b/test/Analysis/CostModel/PowerPC/lit.local.cfg
index 2e46300..5d33887 100644
--- a/test/Analysis/CostModel/PowerPC/lit.local.cfg
+++ b/test/Analysis/CostModel/PowerPC/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'PowerPC' in targets:
+if not 'PowerPC' in config.root.targets:
config.unsupported = True
diff --git a/test/Analysis/CostModel/X86/alternate-shuffle-cost.ll b/test/Analysis/CostModel/X86/alternate-shuffle-cost.ll
new file mode 100644
index 0000000..2e162f0
--- /dev/null
+++ b/test/Analysis/CostModel/X86/alternate-shuffle-cost.ll
@@ -0,0 +1,347 @@
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,-ssse3 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE2
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,+sse3,+ssse3 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSSE3
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE41
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+
+; Verify the cost model for alternate shuffles.
+
+; shufflevector instructions with illegal 64-bit vector types.
+; 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
+; 64-bit packed float vectors (v2f32) are widened to type v4f32.
+
+define <2 x i32> @test_v2i32(<2 x i32> %a, <2 x i32> %b) {
+ %1 = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2i32':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <2 x float> @test_v2f32(<2 x float> %a, <2 x float> %b) {
+ %1 = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2f32':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <2 x i32> @test_v2i32_2(<2 x i32> %a, <2 x i32> %b) {
+ %1 = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2i32_2':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <2 x float> @test_v2f32_2(<2 x float> %a, <2 x float> %b) {
+ %1 = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2f32_2':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+; Test shuffles on packed vectors of two elements.
+
+define <2 x i64> @test_v2i64(<2 x i64> %a, <2 x i64> %b) {
+ %1 = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x i64> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2i64':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <2 x double> @test_v2f64(<2 x double> %a, <2 x double> %b) {
+ %1 = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2f64':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <2 x i64> @test_v2i64_2(<2 x i64> %a, <2 x i64> %b) {
+ %1 = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x i64> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2i64_2':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <2 x double> @test_v2f64_2(<2 x double> %a, <2 x double> %b) {
+ %1 = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x double> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2f64_2':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+; Test shuffles on packed vectors of four elements.
+
+define <4 x i32> @test_v4i32(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4i32':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x i32> @test_v4i32_2(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4i32_2':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x float> @test_v4f32(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4f32':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x float> @test_v4f32_2(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4f32_2':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <4 x i64> @test_v4i64(<4 x i64> %a, <4 x i64> %b) {
+ %1 = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x i64> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4i64':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x i64> @test_v4i64_2(<4 x i64> %a, <4 x i64> %b) {
+ %1 = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x i64> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4i64_2':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x double> @test_v4f64(<4 x double> %a, <4 x double> %b) {
+ %1 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4f64':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x double> @test_v4f64_2(<4 x double> %a, <4 x double> %b) {
+ %1 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x double> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4f64_2':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+; Test shuffles on packed vectors of eight elements.
+define <8 x i16> @test_v8i16(<8 x i16> %a, <8 x i16> %b) {
+ %1 = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x i16> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8i16':
+; SSE2: Cost Model: {{.*}} 8 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <8 x i16> @test_v8i16_2(<8 x i16> %a, <8 x i16> %b) {
+ %1 = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ ret <8 x i16> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8i16_2':
+; SSE2: Cost Model: {{.*}} 8 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <8 x i32> @test_v8i32(<8 x i32> %a, <8 x i32> %b) {
+ %1 = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8i32':
+; SSE2: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <8 x i32> @test_v8i32_2(<8 x i32> %a, <8 x i32> %b) {
+ %1 = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ ret <8 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8i32_2':
+; SSE2: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <8 x float> @test_v8f32(<8 x float> %a, <8 x float> %b) {
+ %1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8f32':
+; SSE2: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <8 x float> @test_v8f32_2(<8 x float> %a, <8 x float> %b) {
+ %1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ ret <8 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8f32_2':
+; SSE2: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+; Test shuffles on packed vectors of sixteen elements.
+define <16 x i8> @test_v16i8(<16 x i8> %a, <16 x i8> %b) {
+ %1 = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
+ ret <16 x i8> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v16i8':
+; SSE2: Cost Model: {{.*}} 48 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+
+
+define <16 x i8> @test_v16i8_2(<16 x i8> %a, <16 x i8> %b) {
+ %1 = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
+ ret <16 x i8> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v16i8_2':
+; SSE2: Cost Model: {{.*}} 48 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+
+
+define <16 x i16> @test_v16i16(<16 x i16> %a, <16 x i16> %b) {
+ %1 = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
+ ret <16 x i16> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v16i16':
+; SSE2: Cost Model: {{.*}} 16 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 5 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <16 x i16> @test_v16i16_2(<16 x i16> %a, <16 x i16> %b) {
+ %1 = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
+ ret <16 x i16> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v16i16_2':
+; SSE2: Cost Model: {{.*}} 16 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 5 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <32 x i8> @test_v32i8(<32 x i8> %a, <32 x i8> %b) {
+ %1 = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 33, i32 2, i32 35, i32 4, i32 37, i32 6, i32 39, i32 8, i32 41, i32 10, i32 43, i32 12, i32 45, i32 14, i32 47, i32 16, i32 49, i32 18, i32 51, i32 20, i32 53, i32 22, i32 55, i32 24, i32 57, i32 26, i32 59, i32 28, i32 61, i32 30, i32 63>
+ ret <32 x i8> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v32i8':
+; SSE2: Cost Model: {{.*}} 96 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 9 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 9 for instruction: %1 = shufflevector
+
+
+define <32 x i8> @test_v32i8_2(<32 x i8> %a, <32 x i8> %b) {
+ %1 = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 32, i32 1, i32 34, i32 3, i32 36, i32 5, i32 38, i32 7, i32 40, i32 9, i32 42, i32 11, i32 44, i32 13, i32 46, i32 15, i32 48, i32 17, i32 50, i32 19, i32 52, i32 21, i32 54, i32 23, i32 56, i32 25, i32 58, i32 27, i32 60, i32 29, i32 62, i32 31>
+ ret <32 x i8> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v32i8_2':
+; SSE2: Cost Model: {{.*}} 96 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 9 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 9 for instruction: %1 = shufflevector
+
diff --git a/test/Analysis/CostModel/X86/lit.local.cfg b/test/Analysis/CostModel/X86/lit.local.cfg
index ba763cf..e71f3cc 100644
--- a/test/Analysis/CostModel/X86/lit.local.cfg
+++ b/test/Analysis/CostModel/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Analysis/Delinearization/multidim_only_ivs_2d.ll b/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
index 48bec08..5a88c4c 100644
--- a/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
+++ b/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
@@ -8,6 +8,15 @@
; A[i][j] = 1.0;
; }
+; Inst: %val = load double* %arrayidx
+; In Loop with Header: for.j
+; AddRec: {{0,+,(%m * sizeof(double))}<%for.i>,+,sizeof(double)}<%for.j>
+; Base offset: %A
+; ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>]
+
+; Inst: store double %val, double* %arrayidx
+; In Loop with Header: for.j
; AddRec: {{%A,+,(8 * %m)}<%for.i>,+,8}<%for.j>
; CHECK: Base offset: %A
; CHECK: ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
@@ -26,7 +35,8 @@ for.j:
%j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ]
%vlaarrayidx.sum = add i64 %j, %tmp
%arrayidx = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum
- store double 1.0, double* %arrayidx
+ %val = load double* %arrayidx
+ store double %val, double* %arrayidx
%j.inc = add nsw i64 %j, 1
%j.exitcond = icmp eq i64 %j.inc, %m
br i1 %j.exitcond, label %for.i.inc, label %for.j