aboutsummaryrefslogtreecommitdiffstats
path: root/test/Transforms
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-05-29 02:49:00 -0700
committerStephen Hines <srhines@google.com>2014-05-29 02:49:00 -0700
commitdce4a407a24b04eebc6a376f8e62b41aaa7b071f (patch)
treedcebc53f2b182f145a2e659393bf9a0472cedf23 /test/Transforms
parent220b921aed042f9e520c26cffd8282a94c66c3d5 (diff)
downloadexternal_llvm-dce4a407a24b04eebc6a376f8e62b41aaa7b071f.zip
external_llvm-dce4a407a24b04eebc6a376f8e62b41aaa7b071f.tar.gz
external_llvm-dce4a407a24b04eebc6a376f8e62b41aaa7b071f.tar.bz2
Update LLVM for 3.5 rebase (r209712).
Change-Id: I149556c940fb7dc92d075273c87ff584f400941f
Diffstat (limited to 'test/Transforms')
-rw-r--r--test/Transforms/AddDiscriminators/no-discriminators.ll71
-rw-r--r--test/Transforms/ArgumentPromotion/inalloca.ll4
-rw-r--r--test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v7.ll340
-rw-r--r--test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v8.ll202
-rw-r--r--test/Transforms/AtomicExpandLoadLinked/ARM/lit.local.cfg (renamed from test/Transforms/GlobalMerge/ARM64/lit.local.cfg)2
-rw-r--r--test/Transforms/BBVectorize/simple-int.ll376
-rw-r--r--test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll37
-rw-r--r--test/Transforms/ConstProp/loads.ll34
-rw-r--r--test/Transforms/ConstantHoisting/AArch64/const-addr.ll23
-rw-r--r--test/Transforms/ConstantHoisting/AArch64/large-immediate.ll27
-rw-r--r--test/Transforms/ConstantHoisting/AArch64/lit.local.cfg3
-rw-r--r--test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll23
-rw-r--r--test/Transforms/ConstantHoisting/PowerPC/lit.local.cfg (renamed from test/Transforms/LoopVectorize/ARM64/lit.local.cfg)4
-rw-r--r--test/Transforms/ConstantHoisting/PowerPC/masks.ll66
-rw-r--r--test/Transforms/ConstantHoisting/X86/cast-inst.ll29
-rw-r--r--test/Transforms/ConstantHoisting/X86/delete-dead-cast-inst.ll10
-rw-r--r--test/Transforms/ConstantHoisting/X86/large-immediate.ll27
-rw-r--r--test/Transforms/GVN/load-pre-nonlocal.ll87
-rw-r--r--test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll18
-rw-r--r--test/Transforms/GlobalDCE/global_ctors.ll14
-rw-r--r--test/Transforms/GlobalDCE/global_ctors_integration.ll45
-rw-r--r--test/Transforms/GlobalMerge/AArch64/arm64.ll (renamed from test/Transforms/GlobalMerge/ARM64/arm64.ll)0
-rw-r--r--test/Transforms/GlobalMerge/AArch64/lit.local.cfg4
-rw-r--r--test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll2
-rw-r--r--test/Transforms/GlobalOpt/alias-resolve.ll4
-rw-r--r--test/Transforms/GlobalOpt/alias-used-section.ll4
-rw-r--r--test/Transforms/GlobalOpt/atexit.ll2
-rw-r--r--test/Transforms/GlobalOpt/ctor-list-opt.ll19
-rw-r--r--test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll2
-rw-r--r--test/Transforms/IndVarSimplify/pr18223.ll30
-rw-r--r--test/Transforms/Inline/2010-05-31-ByvalTailcall.ll24
-rw-r--r--test/Transforms/Inline/always-inline.ll11
-rw-r--r--test/Transforms/Inline/byval-tail-call.ll38
-rw-r--r--test/Transforms/Inline/byval_lifetime.ll26
-rw-r--r--test/Transforms/Inline/inline-cold.ll116
-rw-r--r--test/Transforms/Inline/inline-tail.ll185
-rw-r--r--test/Transforms/Inline/inline-vla.ll38
-rw-r--r--test/Transforms/Inline/optimization-remarks.ll60
-rw-r--r--test/Transforms/Inline/switch.ll60
-rw-r--r--test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll20
-rw-r--r--test/Transforms/InstCombine/OverlappingInsertvalues.ll36
-rw-r--r--test/Transforms/InstCombine/alloca.ll21
-rw-r--r--test/Transforms/InstCombine/bitcast-alias-function.ll24
-rw-r--r--test/Transforms/InstCombine/blend_x86.ll55
-rw-r--r--test/Transforms/InstCombine/call-cast-target-inalloca.ll2
-rw-r--r--test/Transforms/InstCombine/div.ll19
-rw-r--r--test/Transforms/InstCombine/gep-addrspace.ll17
-rw-r--r--test/Transforms/InstCombine/icmp.ll9
-rw-r--r--test/Transforms/InstCombine/memcpy-from-global.ll67
-rw-r--r--test/Transforms/InstCombine/overflow-mul.ll164
-rw-r--r--test/Transforms/InstCombine/pr19420.ll67
-rw-r--r--test/Transforms/InstCombine/select.ll100
-rw-r--r--test/Transforms/InstCombine/shift.ll94
-rw-r--r--test/Transforms/InstCombine/strlen-1.ll12
-rw-r--r--test/Transforms/InstCombine/vec_demanded_elts.ll366
-rw-r--r--test/Transforms/InstCombine/vec_shuffle.ll162
-rw-r--r--test/Transforms/InstSimplify/compare.ll126
-rw-r--r--test/Transforms/InstSimplify/dead-code-removal.ll15
-rw-r--r--test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll11
-rw-r--r--test/Transforms/Internalize/local-visibility.ll25
-rw-r--r--test/Transforms/JumpThreading/phi-eq.ll2
-rw-r--r--test/Transforms/LoopSimplify/2007-10-28-InvokeCrash.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/AArch64/lit.local.cfg5
-rw-r--r--test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll (renamed from test/Transforms/LoopStrengthReduce/ARM64/lsr-memcpy.ll)0
-rw-r--r--test/Transforms/LoopStrengthReduce/AArch64/lsr-memset.ll (renamed from test/Transforms/LoopStrengthReduce/ARM64/lsr-memset.ll)0
-rw-r--r--test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll70
-rw-r--r--test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll3
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/dont_insert_redundant_ops.ll6
-rw-r--r--test/Transforms/LoopUnroll/X86/partial.ll51
-rw-r--r--test/Transforms/LoopUnroll/loop-remarks.ll25
-rw-r--r--test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll42
-rw-r--r--test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll42
-rw-r--r--test/Transforms/LoopVectorize/AArch64/gather-cost.ll (renamed from test/Transforms/LoopVectorize/ARM64/gather-cost.ll)0
-rw-r--r--test/Transforms/LoopVectorize/AArch64/lit.local.cfg (renamed from test/Transforms/LoopStrengthReduce/ARM64/lit.local.cfg)1
-rw-r--r--test/Transforms/LoopVectorize/X86/metadata-enable.ll20
-rw-r--r--test/Transforms/LoopVectorize/X86/vect.omp.force.ll93
-rw-r--r--test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll73
-rw-r--r--test/Transforms/LoopVectorize/X86/vectorization-remarks.ll67
-rw-r--r--test/Transforms/LoopVectorize/store-shuffle-bug.ll17
-rw-r--r--test/Transforms/LoopVectorize/vect.omp.persistence.ll88
-rw-r--r--test/Transforms/LoopVectorize/vect.stats.ll65
-rw-r--r--test/Transforms/MergeFunc/crash.ll14
-rw-r--r--test/Transforms/MergeFunc/inttoptr-address-space.ll6
-rw-r--r--test/Transforms/MergeFunc/inttoptr.ll14
-rw-r--r--test/Transforms/MergeFunc/mergefunc-struct-return.ll40
-rw-r--r--test/Transforms/SLPVectorizer/AArch64/lit.local.cfg3
-rw-r--r--test/Transforms/SLPVectorizer/AArch64/mismatched-intrinsics.ll (renamed from test/Transforms/SLPVectorizer/ARM64/mismatched-intrinsics.ll)0
-rw-r--r--test/Transforms/SLPVectorizer/X86/align.ll27
-rw-r--r--test/Transforms/SLPVectorizer/X86/call.ll128
-rw-r--r--test/Transforms/SLPVectorizer/X86/consecutive-access.ll175
-rw-r--r--test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll31
-rw-r--r--test/Transforms/SLPVectorizer/X86/cse.ll30
-rw-r--r--test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll62
-rw-r--r--test/Transforms/SLPVectorizer/X86/intrinsic.ll44
-rw-r--r--test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll36
-rw-r--r--test/Transforms/SLPVectorizer/X86/value-bug.ll80
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg (renamed from test/Transforms/SLPVectorizer/ARM64/lit.local.cfg)3
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll59
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll137
-rw-r--r--test/Transforms/SimplifyCFG/extract-cost.ll22
-rw-r--r--test/Transforms/TailCallElim/basic.ll31
103 files changed, 4954 insertions, 143 deletions
diff --git a/test/Transforms/AddDiscriminators/no-discriminators.ll b/test/Transforms/AddDiscriminators/no-discriminators.ll
new file mode 100644
index 0000000..f7b45e29
--- /dev/null
+++ b/test/Transforms/AddDiscriminators/no-discriminators.ll
@@ -0,0 +1,71 @@
+; RUN: opt < %s -add-discriminators -S | FileCheck %s
+
+; We should not generate discriminators for DWARF versions prior to 4.
+;
+; Original code:
+;
+; int foo(long i) {
+; if (i < 5) return 2; else return 90;
+; }
+;
+; None of the !dbg nodes associated with the if() statement should be
+; altered. If they are, it means that the discriminators pass added a
+; new lexical scope.
+
+define i32 @foo(i64 %i) #0 {
+entry:
+ %retval = alloca i32, align 4
+ %i.addr = alloca i64, align 8
+ store i64 %i, i64* %i.addr, align 8
+ call void @llvm.dbg.declare(metadata !{i64* %i.addr}, metadata !13), !dbg !14
+ %0 = load i64* %i.addr, align 8, !dbg !15
+; CHECK: %0 = load i64* %i.addr, align 8, !dbg !15
+ %cmp = icmp slt i64 %0, 5, !dbg !15
+; CHECK: %cmp = icmp slt i64 %0, 5, !dbg !15
+ br i1 %cmp, label %if.then, label %if.else, !dbg !15
+; CHECK: br i1 %cmp, label %if.then, label %if.else, !dbg !15
+
+if.then: ; preds = %entry
+ store i32 2, i32* %retval, !dbg !15
+ br label %return, !dbg !15
+
+if.else: ; preds = %entry
+ store i32 90, i32* %retval, !dbg !15
+ br label %return, !dbg !15
+
+return: ; preds = %if.else, %if.then
+ %1 = load i32* %retval, !dbg !17
+ ret i32 %1, !dbg !17
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!10, !11}
+!llvm.ident = !{!12}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [./no-discriminators] [DW_LANG_C99]
+!1 = metadata !{metadata !"no-discriminators", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i64)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [./no-discriminators]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8, metadata !9}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786468, null, null, metadata !"long int", i32 0, i64 64, i64 64, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [long int] [line 0, size 64, align 64, offset 0, enc DW_ATE_signed]
+!10 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+; CHECK: !10 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!11 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!12 = metadata !{metadata !"clang version 3.5.0 "}
+!13 = metadata !{i32 786689, metadata !4, metadata !"i", metadata !5, i32 16777217, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [i] [line 1]
+!14 = metadata !{i32 1, i32 0, metadata !4, null}
+!15 = metadata !{i32 2, i32 0, metadata !16, null}
+; CHECK: !15 = metadata !{i32 2, i32 0, metadata !16, null}
+!16 = metadata !{i32 786443, metadata !1, metadata !4, i32 2, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [./no-discriminators]
+; CHECK: !16 = metadata !{i32 786443, metadata !1, metadata !4, i32 2, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [./no-discriminators]
+!17 = metadata !{i32 3, i32 0, metadata !4, null}
diff --git a/test/Transforms/ArgumentPromotion/inalloca.ll b/test/Transforms/ArgumentPromotion/inalloca.ll
index 513a968..089a78f 100644
--- a/test/Transforms/ArgumentPromotion/inalloca.ll
+++ b/test/Transforms/ArgumentPromotion/inalloca.ll
@@ -20,7 +20,7 @@ entry:
define i32 @main() {
entry:
- %S = alloca %struct.ss
+ %S = alloca inalloca %struct.ss
%f0 = getelementptr %struct.ss* %S, i32 0, i32 0
%f1 = getelementptr %struct.ss* %S, i32 0, i32 1
store i32 1, i32* %f0, align 4
@@ -42,7 +42,7 @@ entry:
define i32 @test() {
entry:
- %S = alloca %struct.ss
+ %S = alloca inalloca %struct.ss
%c = call i1 @g(%struct.ss* %S, %struct.ss* inalloca %S)
; CHECK: call i1 @g(%struct.ss* %S, %struct.ss* inalloca %S)
ret i32 0
diff --git a/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v7.ll b/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v7.ll
new file mode 100644
index 0000000..ac9fc1f
--- /dev/null
+++ b/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v7.ll
@@ -0,0 +1,340 @@
+; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -atomic-ll-sc %s | FileCheck %s
+
+define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
+; CHECK-LABEL: @test_atomic_xchg_i8
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[NEWVAL32:%.*]] = zext i8 %xchgend to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw xchg i8* %ptr, i8 %xchgend monotonic
+ ret i8 %res
+}
+
+define i16 @test_atomic_add_i16(i16* %ptr, i16 %addend) {
+; CHECK-LABEL: @test_atomic_add_i16
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
+; CHECK: [[NEWVAL:%.*]] = add i16 [[OLDVAL]], %addend
+; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i16 [[OLDVAL]]
+ %res = atomicrmw add i16* %ptr, i16 %addend seq_cst
+ ret i16 %res
+}
+
+define i32 @test_atomic_sub_i32(i32* %ptr, i32 %subend) {
+; CHECK-LABEL: @test_atomic_sub_i32
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %ptr)
+; CHECK: [[NEWVAL:%.*]] = sub i32 [[OLDVAL]], %subend
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 [[NEWVAL]], i32* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence acquire
+; CHECK: ret i32 [[OLDVAL]]
+ %res = atomicrmw sub i32* %ptr, i32 %subend acquire
+ ret i32 %res
+}
+
+define i8 @test_atomic_and_i8(i8* %ptr, i8 %andend) {
+; CHECK-LABEL: @test_atomic_and_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[NEWVAL:%.*]] = and i8 [[OLDVAL]], %andend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw and i8* %ptr, i8 %andend release
+ ret i8 %res
+}
+
+define i16 @test_atomic_nand_i16(i16* %ptr, i16 %nandend) {
+; CHECK-LABEL: @test_atomic_nand_i16
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
+; CHECK: [[NEWVAL_TMP:%.*]] = xor i16 %nandend, -1
+; CHECK: [[NEWVAL:%.*]] = and i16 [[OLDVAL]], [[NEWVAL_TMP]]
+; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i16 [[OLDVAL]]
+ %res = atomicrmw nand i16* %ptr, i16 %nandend seq_cst
+ ret i16 %res
+}
+
+define i64 @test_atomic_or_i64(i64* %ptr, i64 %orend) {
+; CHECK-LABEL: @test_atomic_or_i64
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
+; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
+; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
+; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
+; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
+; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
+; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
+; CHECK: [[NEWVAL:%.*]] = or i64 [[OLDVAL]], %orend
+; CHECK: [[NEWLO:%.*]] = trunc i64 [[NEWVAL]] to i32
+; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 [[NEWVAL]], 32
+; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i64 [[OLDVAL]]
+ %res = atomicrmw or i64* %ptr, i64 %orend seq_cst
+ ret i64 %res
+}
+
+define i8 @test_atomic_xor_i8(i8* %ptr, i8 %xorend) {
+; CHECK-LABEL: @test_atomic_xor_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[NEWVAL:%.*]] = xor i8 [[OLDVAL]], %xorend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw xor i8* %ptr, i8 %xorend seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomic_max_i8(i8* %ptr, i8 %maxend) {
+; CHECK-LABEL: @test_atomic_max_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[WANT_OLD:%.*]] = icmp sgt i8 [[OLDVAL]], %maxend
+; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %maxend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw max i8* %ptr, i8 %maxend seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomic_min_i8(i8* %ptr, i8 %minend) {
+; CHECK-LABEL: @test_atomic_min_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[WANT_OLD:%.*]] = icmp sle i8 [[OLDVAL]], %minend
+; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %minend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw min i8* %ptr, i8 %minend seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomic_umax_i8(i8* %ptr, i8 %umaxend) {
+; CHECK-LABEL: @test_atomic_umax_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[WANT_OLD:%.*]] = icmp ugt i8 [[OLDVAL]], %umaxend
+; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %umaxend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw umax i8* %ptr, i8 %umaxend seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomic_umin_i8(i8* %ptr, i8 %uminend) {
+; CHECK-LABEL: @test_atomic_umin_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[WANT_OLD:%.*]] = icmp ule i8 [[OLDVAL]], %uminend
+; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %uminend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw umin i8* %ptr, i8 %uminend seq_cst
+ ret i8 %res
+}
+
+define i8 @test_cmpxchg_i8_seqcst_seqcst(i8* %ptr, i8 %desired, i8 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i8_seqcst_seqcst
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i8
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i8 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[BARRIER:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWVAL32:%.*]] = zext i8 %newval to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
+
+; CHECK: [[BARRIER]]:
+; CHECK: fence seq_cst
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[DONE]]:
+; CHECK: ret i8 [[OLDVAL]]
+
+ %old = cmpxchg i8* %ptr, i8 %desired, i8 %newval seq_cst seq_cst
+ ret i8 %old
+}
+
+define i16 @test_cmpxchg_i16_seqcst_monotonic(i16* %ptr, i16 %desired, i16 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i16_seqcst_monotonic
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i16
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i16 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWVAL32:%.*]] = zext i16 %newval to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
+
+; CHECK: [[BARRIER]]:
+; CHECK: fence seq_cst
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[DONE]]:
+; CHECK: ret i16 [[OLDVAL]]
+
+ %old = cmpxchg i16* %ptr, i16 %desired, i16 %newval seq_cst monotonic
+ ret i16 %old
+}
+
+define i32 @test_cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %desired, i32 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i32_acquire_acquire
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %ptr)
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %newval, i32* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
+
+; CHECK: [[BARRIER]]:
+; CHECK: fence acquire
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[DONE]]:
+; CHECK: ret i32 [[OLDVAL]]
+
+ %old = cmpxchg i32* %ptr, i32 %desired, i32 %newval acquire acquire
+ ret i32 %old
+}
+
+define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i64_monotonic_monotonic
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
+; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
+; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
+; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
+; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
+; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
+; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i64 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWLO:%.*]] = trunc i64 %newval to i32
+; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 %newval, 32
+; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
+
+; CHECK: [[BARRIER]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[DONE]]:
+; CHECK: ret i64 [[OLDVAL]]
+
+ %old = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
+ ret i64 %old
+} \ No newline at end of file
diff --git a/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v8.ll b/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v8.ll
new file mode 100644
index 0000000..bec5bef
--- /dev/null
+++ b/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v8.ll
@@ -0,0 +1,202 @@
+; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -atomic-ll-sc %s | FileCheck %s
+
+define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
+; CHECK-LABEL: @test_atomic_xchg_i8
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[NEWVAL32:%.*]] = zext i8 %xchgend to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw xchg i8* %ptr, i8 %xchgend monotonic
+ ret i8 %res
+}
+
+define i16 @test_atomic_add_i16(i16* %ptr, i16 %addend) {
+; CHECK-LABEL: @test_atomic_add_i16
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i16(i16* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
+; CHECK: [[NEWVAL:%.*]] = add i16 [[OLDVAL]], %addend
+; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i16 [[OLDVAL]]
+ %res = atomicrmw add i16* %ptr, i16 %addend seq_cst
+ ret i16 %res
+}
+
+define i32 @test_atomic_sub_i32(i32* %ptr, i32 %subend) {
+; CHECK-LABEL: @test_atomic_sub_i32
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldaex.p0i32(i32* %ptr)
+; CHECK: [[NEWVAL:%.*]] = sub i32 [[OLDVAL]], %subend
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 [[NEWVAL]], i32* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i32 [[OLDVAL]]
+ %res = atomicrmw sub i32* %ptr, i32 %subend acquire
+ ret i32 %res
+}
+
+define i64 @test_atomic_or_i64(i64* %ptr, i64 %orend) {
+; CHECK-LABEL: @test_atomic_or_i64
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldaexd(i8* [[PTR8]])
+; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
+; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
+; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
+; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
+; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
+; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
+; CHECK: [[NEWVAL:%.*]] = or i64 [[OLDVAL]], %orend
+; CHECK: [[NEWLO:%.*]] = trunc i64 [[NEWVAL]] to i32
+; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 [[NEWVAL]], 32
+; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i64 [[OLDVAL]]
+ %res = atomicrmw or i64* %ptr, i64 %orend seq_cst
+ ret i64 %res
+}
+
+define i8 @test_cmpxchg_i8_seqcst_seqcst(i8* %ptr, i8 %desired, i8 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i8_seqcst_seqcst
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i8
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i8 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[BARRIER:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWVAL32:%.*]] = zext i8 %newval to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
+
+; CHECK: [[BARRIER]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[DONE]]:
+; CHECK: ret i8 [[OLDVAL]]
+
+ %old = cmpxchg i8* %ptr, i8 %desired, i8 %newval seq_cst seq_cst
+ ret i8 %old
+}
+
+define i16 @test_cmpxchg_i16_seqcst_monotonic(i16* %ptr, i16 %desired, i16 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i16_seqcst_monotonic
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i16(i16* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i16
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i16 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWVAL32:%.*]] = zext i16 %newval to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
+
+; CHECK: [[BARRIER]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[DONE]]:
+; CHECK: ret i16 [[OLDVAL]]
+
+ %old = cmpxchg i16* %ptr, i16 %desired, i16 %newval seq_cst monotonic
+ ret i16 %old
+}
+
+define i32 @test_cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %desired, i32 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i32_acquire_acquire
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldaex.p0i32(i32* %ptr)
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %newval, i32* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
+
+; CHECK: [[BARRIER]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[DONE]]:
+; CHECK: ret i32 [[OLDVAL]]
+
+ %old = cmpxchg i32* %ptr, i32 %desired, i32 %newval acquire acquire
+ ret i32 %old
+}
+
+define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i64_monotonic_monotonic
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
+; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
+; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
+; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
+; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
+; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
+; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i64 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWLO:%.*]] = trunc i64 %newval to i32
+; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 %newval, 32
+; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
+
+; CHECK: [[BARRIER]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[DONE]]:
+; CHECK: ret i64 [[OLDVAL]]
+
+ %old = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
+ ret i64 %old
+} \ No newline at end of file
diff --git a/test/Transforms/GlobalMerge/ARM64/lit.local.cfg b/test/Transforms/AtomicExpandLoadLinked/ARM/lit.local.cfg
index a75a42b..8a3ba96 100644
--- a/test/Transforms/GlobalMerge/ARM64/lit.local.cfg
+++ b/test/Transforms/AtomicExpandLoadLinked/ARM/lit.local.cfg
@@ -1,4 +1,4 @@
targets = set(config.root.targets_to_build.split())
-if not 'ARM64' in targets:
+if not 'ARM' in targets:
config.unsupported = True
diff --git a/test/Transforms/BBVectorize/simple-int.ll b/test/Transforms/BBVectorize/simple-int.ll
index e90900a..e0c1efa 100644
--- a/test/Transforms/BBVectorize/simple-int.ll
+++ b/test/Transforms/BBVectorize/simple-int.ll
@@ -5,6 +5,18 @@ declare double @llvm.fma.f64(double, double, double)
declare double @llvm.fmuladd.f64(double, double, double)
declare double @llvm.cos.f64(double)
declare double @llvm.powi.f64(double, i32)
+declare double @llvm.round.f64(double)
+declare double @llvm.copysign.f64(double, double)
+declare double @llvm.ceil.f64(double)
+declare double @llvm.nearbyint.f64(double)
+declare double @llvm.rint.f64(double)
+declare double @llvm.trunc.f64(double)
+declare double @llvm.floor.f64(double)
+declare double @llvm.fabs.f64(double)
+declare i64 @llvm.bswap.i64(i64)
+declare i64 @llvm.ctpop.i64(i64)
+declare i64 @llvm.ctlz.i64(i64, i1)
+declare i64 @llvm.cttz.i64(i64, i1)
; Basic depth-3 chain with fma
define double @test1(double %A1, double %A2, double %B1, double %B2, double %C1, double %C2) {
@@ -124,9 +136,371 @@ define double @test4(double %A1, double %A2, double %B1, double %B2, i32 %P) {
; CHECK: ret double %R
}
+; Basic depth-3 chain with round
+define double @testround(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.round.f64(double %X1)
+ %Y2 = call double @llvm.round.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testround
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.round.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with copysign
+define double @testcopysign(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.copysign.f64(double %X1, double %A1)
+ %Y2 = call double @llvm.copysign.f64(double %X2, double %A1)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testcopysign
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1.v.i1.2 = insertelement <2 x double> %X1.v.i0.1, double %A1, i32 1
+; CHECK: %Y1 = call <2 x double> @llvm.copysign.v2f64(<2 x double> %X1, <2 x double> %Y1.v.i1.2)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with ceil
+define double @testceil(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.ceil.f64(double %X1)
+ %Y2 = call double @llvm.ceil.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testceil
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with nearbyint
+define double @testnearbyint(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.nearbyint.f64(double %X1)
+ %Y2 = call double @llvm.nearbyint.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testnearbyint
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with rint
+define double @testrint(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.rint.f64(double %X1)
+ %Y2 = call double @llvm.rint.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testrint
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.rint.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with trunc
+define double @testtrunc(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.trunc.f64(double %X1)
+ %Y2 = call double @llvm.trunc.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testtrunc
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with floor
+define double @testfloor(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.floor.f64(double %X1)
+ %Y2 = call double @llvm.floor.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testfloor
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with fabs
+define double @testfabs(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.fabs.f64(double %X1)
+ %Y2 = call double @llvm.fabs.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testfabs
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.fabs.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with bswap
+define i64 @testbswap(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.bswap.i64(i64 %X1)
+ %Y2 = call i64 @llvm.bswap.i64(i64 %X2)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testbswap
+; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
+; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %X1)
+; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
+; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
+; CHECK: ret i64 %R
+
+}
+
+; Basic depth-3 chain with ctpop
+define i64 @testctpop(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.ctpop.i64(i64 %X1)
+ %Y2 = call i64 @llvm.ctpop.i64(i64 %X2)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testctpop
+; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
+; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %X1)
+; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
+; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
+; CHECK: ret i64 %R
+
+}
+
+; Basic depth-3 chain with ctlz
+define i64 @testctlz(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
+ %Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 true)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testctlz
+; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
+; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %X1, i1 true)
+; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
+; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
+; CHECK: ret i64 %R
+
+}
+
+; Basic depth-3 chain with ctlz
+define i64 @testctlzneg(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
+ %Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 false)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testctlzneg
+; CHECK: %X1 = sub i64 %A1, %B1
+; CHECK: %X2 = sub i64 %A2, %B2
+; CHECK: %Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
+; CHECK: %Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 false)
+; CHECK: %Z1 = add i64 %Y1, %B1
+; CHECK: %Z2 = add i64 %Y2, %B2
+; CHECK: %R = mul i64 %Z1, %Z2
+; CHECK: ret i64 %R
+}
+
+; Basic depth-3 chain with cttz
+define i64 @testcttz(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
+ %Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 true)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testcttz
+; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
+; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %X1, i1 true)
+; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
+; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
+; CHECK: ret i64 %R
+
+}
+
+; Basic depth-3 chain with cttz
+define i64 @testcttzneg(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
+ %Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 false)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testcttzneg
+; CHECK: %X1 = sub i64 %A1, %B1
+; CHECK: %X2 = sub i64 %A2, %B2
+; CHECK: %Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
+; CHECK: %Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 false)
+; CHECK: %Z1 = add i64 %Y1, %B1
+; CHECK: %Z2 = add i64 %Y2, %B2
+; CHECK: %R = mul i64 %Z1, %Z2
+; CHECK: ret i64 %R
+}
+
+
+
; CHECK: declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
; CHECK: declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
; CHECK: declare <2 x double> @llvm.cos.v2f64(<2 x double>) #0
; CHECK: declare <2 x double> @llvm.powi.v2f64(<2 x double>, i32) #0
-
+; CHECK: declare <2 x double> @llvm.round.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) #0
+; CHECK: declare <2 x double> @llvm.ceil.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.rint.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.trunc.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.floor.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.fabs.v2f64(<2 x double>) #0
+; CHECK: declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) #0
+; CHECK: declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) #0
+; CHECK: declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) #0
+; CHECK: declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1) #0
; CHECK: attributes #0 = { nounwind readnone }
diff --git a/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll b/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll
new file mode 100644
index 0000000..a985c36
--- /dev/null
+++ b/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll
@@ -0,0 +1,37 @@
+; RUN: opt -S -codegenprepare < %s | FileCheck %s
+
+target datalayout =
+"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-LABEL: @load_cast_gep
+; CHECK: add i64 %sunkaddr, 40
+define void @load_cast_gep(i1 %cond, i64* %base) {
+entry:
+ %addr = getelementptr inbounds i64* %base, i64 5
+ %casted = addrspacecast i64* %addr to i32 addrspace(1)*
+ br i1 %cond, label %if.then, label %fallthrough
+
+if.then:
+ %v = load i32 addrspace(1)* %casted, align 4
+ br label %fallthrough
+
+fallthrough:
+ ret void
+}
+
+; CHECK-LABEL: @store_gep_cast
+; CHECK: add i64 %sunkaddr, 20
+define void @store_gep_cast(i1 %cond, i64* %base) {
+entry:
+ %casted = addrspacecast i64* %base to i32 addrspace(1)*
+ %addr = getelementptr inbounds i32 addrspace(1)* %casted, i64 5
+ br i1 %cond, label %if.then, label %fallthrough
+
+if.then:
+ store i32 0, i32 addrspace(1)* %addr, align 4
+ br label %fallthrough
+
+fallthrough:
+ ret void
+}
diff --git a/test/Transforms/ConstProp/loads.ll b/test/Transforms/ConstProp/loads.ll
index d05db47..0ea9c47 100644
--- a/test/Transforms/ConstProp/loads.ll
+++ b/test/Transforms/ConstProp/loads.ll
@@ -219,3 +219,37 @@ entry:
; BE-LABEL: @test15(
; BE: ret i64 2
}
+
+@gv7 = constant [4 x i8*] [i8* null, i8* inttoptr (i64 -14 to i8*), i8* null, i8* null]
+define i64 @test16.1() {
+ %v = load i64* bitcast ([4 x i8*]* @gv7 to i64*), align 8
+ ret i64 %v
+
+; LE-LABEL: @test16.1(
+; LE: ret i64 0
+
+; BE-LABEL: @test16.1(
+; BE: ret i64 0
+}
+
+define i64 @test16.2() {
+ %v = load i64* bitcast (i8** getelementptr inbounds ([4 x i8*]* @gv7, i64 0, i64 1) to i64*), align 8
+ ret i64 %v
+
+; LE-LABEL: @test16.2(
+; LE: ret i64 -14
+
+; BE-LABEL: @test16.2(
+; BE: ret i64 -14
+}
+
+define i64 @test16.3() {
+ %v = load i64* bitcast (i8** getelementptr inbounds ([4 x i8*]* @gv7, i64 0, i64 2) to i64*), align 8
+ ret i64 %v
+
+; LE-LABEL: @test16.3(
+; LE: ret i64 0
+
+; BE-LABEL: @test16.3(
+; BE: ret i64 0
+}
diff --git a/test/Transforms/ConstantHoisting/AArch64/const-addr.ll b/test/Transforms/ConstantHoisting/AArch64/const-addr.ll
new file mode 100644
index 0000000..89d5960
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/AArch64/const-addr.ll
@@ -0,0 +1,23 @@
+; RUN: opt -mtriple=arm64-darwin-unknown -S -consthoist < %s | FileCheck %s
+
+%T = type { i32, i32, i32, i32 }
+
+define i32 @test1() nounwind {
+; CHECK-LABEL: test1
+; CHECK: %const = bitcast i64 68141056 to i64
+; CHECK: %1 = inttoptr i64 %const to %T*
+; CHECK: %o1 = getelementptr %T* %1, i32 0, i32 1
+; CHECK: %o2 = getelementptr %T* %1, i32 0, i32 2
+; CHECK: %o3 = getelementptr %T* %1, i32 0, i32 3
+ %at = inttoptr i64 68141056 to %T*
+ %o1 = getelementptr %T* %at, i32 0, i32 1
+ %t1 = load i32* %o1
+ %o2 = getelementptr %T* %at, i32 0, i32 2
+ %t2 = load i32* %o2
+ %a1 = add i32 %t1, %t2
+ %o3 = getelementptr %T* %at, i32 0, i32 3
+ %t3 = load i32* %o3
+ %a2 = add i32 %a1, %t3
+ ret i32 %a2
+}
+
diff --git a/test/Transforms/ConstantHoisting/AArch64/large-immediate.ll b/test/Transforms/ConstantHoisting/AArch64/large-immediate.ll
new file mode 100644
index 0000000..575be79
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/AArch64/large-immediate.ll
@@ -0,0 +1,27 @@
+; RUN: opt -mtriple=arm64-darwin-unknown -S -consthoist < %s | FileCheck %s
+
+define i128 @test1(i128 %a) nounwind {
+; CHECK-LABEL: test1
+; CHECK: %const = bitcast i128 12297829382473034410122878 to i128
+ %1 = add i128 %a, 12297829382473034410122878
+ %2 = add i128 %1, 12297829382473034410122878
+ ret i128 %2
+}
+
+; Check that we don't hoist large, but cheap constants
+define i512 @test2(i512 %a) nounwind {
+; CHECK-LABEL: test2
+; CHECK-NOT: %const = bitcast i512 7 to i512
+ %1 = and i512 %a, 7
+ %2 = or i512 %1, 7
+ ret i512 %2
+}
+
+; Check that we don't hoist the shift value of a shift instruction.
+define i512 @test3(i512 %a) nounwind {
+; CHECK-LABEL: test3
+; CHECK-NOT: %const = bitcast i512 504 to i512
+ %1 = shl i512 %a, 504
+ %2 = ashr i512 %1, 504
+ ret i512 %2
+}
diff --git a/test/Transforms/ConstantHoisting/AArch64/lit.local.cfg b/test/Transforms/ConstantHoisting/AArch64/lit.local.cfg
new file mode 100644
index 0000000..c420349
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/AArch64/lit.local.cfg
@@ -0,0 +1,3 @@
+targets = set(config.root.targets_to_build.split())
+if not 'AArch64' in targets:
+ config.unsupported = True
diff --git a/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll b/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll
new file mode 100644
index 0000000..b4337ee
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll
@@ -0,0 +1,23 @@
+; RUN: opt -S -consthoist < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%T = type { i32, i32, i32, i32 }
+
+; Test if even cheap base addresses are hoisted.
+define i32 @test1() nounwind {
+; CHECK-LABEL: @test1
+; CHECK: %const = bitcast i32 12345678 to i32
+; CHECK: %1 = inttoptr i32 %const to %T*
+; CHECK: %addr1 = getelementptr %T* %1, i32 0, i32 1
+ %addr1 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 1
+ %tmp1 = load i32* %addr1
+ %addr2 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 2
+ %tmp2 = load i32* %addr2
+ %addr3 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 3
+ %tmp3 = load i32* %addr3
+ %tmp4 = add i32 %tmp1, %tmp2
+ %tmp5 = add i32 %tmp3, %tmp4
+ ret i32 %tmp5
+}
+
diff --git a/test/Transforms/LoopVectorize/ARM64/lit.local.cfg b/test/Transforms/ConstantHoisting/PowerPC/lit.local.cfg
index de86e54..2e46300 100644
--- a/test/Transforms/LoopVectorize/ARM64/lit.local.cfg
+++ b/test/Transforms/ConstantHoisting/PowerPC/lit.local.cfg
@@ -1,6 +1,4 @@
-config.suffixes = ['.ll', '.c', '.cpp']
-
targets = set(config.root.targets_to_build.split())
-if not 'ARM64' in targets:
+if not 'PowerPC' in targets:
config.unsupported = True
diff --git a/test/Transforms/ConstantHoisting/PowerPC/masks.ll b/test/Transforms/ConstantHoisting/PowerPC/masks.ll
new file mode 100644
index 0000000..d553182
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/PowerPC/masks.ll
@@ -0,0 +1,66 @@
+; RUN: opt -S -consthoist < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Here the masks are all contiguous, and should not be hoisted.
+define i32 @test1() nounwind {
+entry:
+; CHECK-LABEL: @test1
+; CHECK-NOT: bitcast i32 65535 to i32
+; CHECK: and i32 undef, 65535
+ %conv121 = and i32 undef, 65535
+ br i1 undef, label %if.then152, label %if.end167
+
+if.then152:
+; CHECK: and i32 undef, 65535
+ %conv153 = and i32 undef, 65535
+ br i1 undef, label %if.end167, label %end2
+
+if.end167:
+; CHECK: and i32 {{.*}}, 32768
+ %shl161 = shl nuw nsw i32 %conv121, 15
+ %0 = load i8* undef, align 1
+ %conv169 = zext i8 %0 to i32
+ %shl170 = shl nuw nsw i32 %conv169, 7
+ %shl161.masked = and i32 %shl161, 32768
+ %conv174 = or i32 %shl170, %shl161.masked
+ %cmp178 = icmp ugt i32 %conv174, 32767
+ br i1 %cmp178, label %end1, label %end2
+
+end1:
+ unreachable
+
+end2:
+ unreachable
+}
+
+; Here the masks are not contiguous, and should be hoisted.
+define i32 @test2() nounwind {
+entry:
+; CHECK-LABEL: @test2
+; CHECK: bitcast i32 65531 to i32
+ %conv121 = and i32 undef, 65531
+ br i1 undef, label %if.then152, label %if.end167
+
+if.then152:
+ %conv153 = and i32 undef, 65531
+ br i1 undef, label %if.end167, label %end2
+
+if.end167:
+; CHECK: add i32 {{.*}}, -32758
+ %shl161 = shl nuw nsw i32 %conv121, 15
+ %0 = load i8* undef, align 1
+ %conv169 = zext i8 %0 to i32
+ %shl170 = shl nuw nsw i32 %conv169, 7
+ %shl161.masked = and i32 %shl161, 32773
+ %conv174 = or i32 %shl170, %shl161.masked
+ %cmp178 = icmp ugt i32 %conv174, 32767
+ br i1 %cmp178, label %end1, label %end2
+
+end1:
+ unreachable
+
+end2:
+ unreachable
+}
+
diff --git a/test/Transforms/ConstantHoisting/X86/cast-inst.ll b/test/Transforms/ConstantHoisting/X86/cast-inst.ll
new file mode 100644
index 0000000..f490f4a
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/X86/cast-inst.ll
@@ -0,0 +1,29 @@
+; RUN: opt -S -consthoist < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; Check if the materialization of the constant and the cast instruction are
+; inserted in the correct order.
+define i32 @cast_inst_test() {
+; CHECK-LABEL: @cast_inst_test
+; CHECK: %const = bitcast i64 4646526064 to i64
+; CHECK: %1 = inttoptr i64 %const to i32*
+; CHECK: %v0 = load i32* %1, align 16
+; CHECK: %const_mat = add i64 %const, 16
+; CHECK-NEXT: %2 = inttoptr i64 %const_mat to i32*
+; CHECK-NEXT: %v1 = load i32* %2, align 16
+; CHECK: %const_mat1 = add i64 %const, 32
+; CHECK-NEXT: %3 = inttoptr i64 %const_mat1 to i32*
+; CHECK-NEXT: %v2 = load i32* %3, align 16
+ %a0 = inttoptr i64 4646526064 to i32*
+ %v0 = load i32* %a0, align 16
+ %a1 = inttoptr i64 4646526080 to i32*
+ %v1 = load i32* %a1, align 16
+ %a2 = inttoptr i64 4646526096 to i32*
+ %v2 = load i32* %a2, align 16
+ %r0 = add i32 %v0, %v1
+ %r1 = add i32 %r0, %v2
+ ret i32 %r1
+}
+
diff --git a/test/Transforms/ConstantHoisting/X86/delete-dead-cast-inst.ll b/test/Transforms/ConstantHoisting/X86/delete-dead-cast-inst.ll
index f8e478e..d352386 100644
--- a/test/Transforms/ConstantHoisting/X86/delete-dead-cast-inst.ll
+++ b/test/Transforms/ConstantHoisting/X86/delete-dead-cast-inst.ll
@@ -1,5 +1,4 @@
-; Test if this compiles without assertions.
-; RUN: opt -S -consthoist < %s
+; RUN: opt -S -consthoist < %s | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
@@ -7,6 +6,13 @@ target triple = "x86_64-apple-macosx10.9.0"
%T = type { i32, i32, i32, i32 }
define i32 @test1() nounwind {
+; CHECK-LABEL: @test1
+; CHECK: %const = bitcast i32 12345678 to i32
+; CHECK-NOT: %base = inttoptr i32 12345678 to %T*
+; CHECK-NEXT: %1 = inttoptr i32 %const to %T*
+; CHECK-NEXT: %addr1 = getelementptr %T* %1, i32 0, i32 1
+; CHECK-NEXT: %addr2 = getelementptr %T* %1, i32 0, i32 2
+; CHECK-NEXT: %addr3 = getelementptr %T* %1, i32 0, i32 3
%base = inttoptr i32 12345678 to %T*
%addr1 = getelementptr %T* %base, i32 0, i32 1
%addr2 = getelementptr %T* %base, i32 0, i32 2
diff --git a/test/Transforms/ConstantHoisting/X86/large-immediate.ll b/test/Transforms/ConstantHoisting/X86/large-immediate.ll
new file mode 100644
index 0000000..e0af9c9
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/X86/large-immediate.ll
@@ -0,0 +1,27 @@
+; RUN: opt -mtriple=x86_64-darwin-unknown -S -consthoist < %s | FileCheck %s
+
+define i128 @test1(i128 %a) nounwind {
+; CHECK-LABEL: test1
+; CHECK: %const = bitcast i128 12297829382473034410122878 to i128
+ %1 = add i128 %a, 12297829382473034410122878
+ %2 = add i128 %1, 12297829382473034410122878
+ ret i128 %2
+}
+
+; Check that we don't hoist the shift value of a shift instruction.
+define i512 @test2(i512 %a) nounwind {
+; CHECK-LABEL: test2
+; CHECK-NOT: %const = bitcast i512 504 to i512
+ %1 = shl i512 %a, 504
+ %2 = ashr i512 %1, 504
+ ret i512 %2
+}
+
+; Check that we don't hoist constants with a type larger than i128.
+define i196 @test3(i196 %a) nounwind {
+; CHECK-LABEL: test3
+; CHECK-NOT: %const = bitcast i196 2 to i196
+ %1 = mul i196 %a, 2
+ %2 = mul i196 %1, 2
+ ret i196 %2
+}
diff --git a/test/Transforms/GVN/load-pre-nonlocal.ll b/test/Transforms/GVN/load-pre-nonlocal.ll
new file mode 100644
index 0000000..7bac1b7
--- /dev/null
+++ b/test/Transforms/GVN/load-pre-nonlocal.ll
@@ -0,0 +1,87 @@
+; RUN: opt -S -o - -basicaa -domtree -gvn %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+%struct.S1 = type { i32, i32 }
+
+@a2 = common global i32* null, align 8
+@a = common global i32* null, align 8
+@s1 = common global %struct.S1 zeroinitializer, align 8
+
+; Check that GVN doesn't determine %2 is partially redundant.
+
+; CHECK-LABEL: define i32 @volatile_load
+; CHECK: for.body:
+; CHECK: %2 = load i32*
+; CHECK: %3 = load volatile i32*
+; CHECK: for.cond.for.end_crit_edge:
+
+define i32 @volatile_load(i32 %n) {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body.lr.ph, label %for.end
+
+for.body.lr.ph:
+ %0 = load i32** @a2, align 8, !tbaa !1
+ %1 = load i32** @a, align 8, !tbaa !1
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
+ %s.09 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
+ %p.08 = phi i32* [ %0, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
+ %2 = load i32* %p.08, align 4, !tbaa !5
+ %arrayidx = getelementptr inbounds i32* %1, i64 %indvars.iv
+ store i32 %2, i32* %arrayidx, align 4, !tbaa !5
+ %3 = load volatile i32* %p.08, align 4, !tbaa !5
+ %add = add nsw i32 %3, %s.09
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %incdec.ptr = getelementptr inbounds i32* %p.08, i64 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp ne i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
+
+for.cond.for.end_crit_edge:
+ %add.lcssa = phi i32 [ %add, %for.body ]
+ br label %for.end
+
+for.end:
+ %s.0.lcssa = phi i32 [ %add.lcssa, %for.cond.for.end_crit_edge ], [ 0, %entry ]
+ ret i32 %s.0.lcssa
+}
+
+; %1 is partially redundant if %0 can be widened to a 64-bit load.
+
+; CHECK-LABEL: define i32 @overaligned_load
+; CHECK: if.end:
+; CHECK-NOT: %1 = load i32*
+
+define i32 @overaligned_load(i32 %a, i32* nocapture %b) {
+entry:
+ %cmp = icmp sgt i32 %a, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %0 = load i32* getelementptr inbounds (%struct.S1* @s1, i64 0, i32 0), align 8, !tbaa !5
+ br label %if.end
+
+if.else:
+ %arrayidx = getelementptr inbounds i32* %b, i64 2
+ store i32 10, i32* %arrayidx, align 4, !tbaa !5
+ br label %if.end
+
+if.end:
+ %i.0 = phi i32 [ %0, %if.then ], [ 0, %if.else ]
+ %p.0 = phi i32* [ getelementptr inbounds (%struct.S1* @s1, i64 0, i32 0), %if.then ], [ %b, %if.else ]
+ %add.ptr = getelementptr inbounds i32* %p.0, i64 1
+ %1 = load i32* %add.ptr, align 4, !tbaa !5
+ %add1 = add nsw i32 %1, %i.0
+ ret i32 %add1
+}
+
+!1 = metadata !{metadata !2, metadata !2, i64 0}
+!2 = metadata !{metadata !"any pointer", metadata !3, i64 0}
+!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
+!4 = metadata !{metadata !"Simple C/C++ TBAA"}
+!5 = metadata !{metadata !6, metadata !6, i64 0}
+!6 = metadata !{metadata !"int", metadata !3, i64 0}
diff --git a/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll b/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll
index 6658cee..4b96799 100644
--- a/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll
+++ b/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll
@@ -1,8 +1,18 @@
-; RUN: opt < %s -globaldce -S | not grep @D
-; RUN: opt < %s -globaldce -S | grep @L | count 3
+; RUN: opt < %s -globaldce -S > %t
+; RUN: FileCheck %s < %t
+; RUN: FileCheck --check-prefix=DEAD %s < %t
@A = global i32 0
+; CHECK: @A = global i32 0
+
@D = alias internal i32* @A
+; DEAD-NOT: @D
+
@L1 = alias i32* @A
-@L2 = alias internal i32* @L1
-@L3 = alias i32* @L2
+; CHECK: @L1 = alias i32* @A
+
+@L2 = alias internal i32* @A
+; DEAD-NOT: @L2
+
+@L3 = alias i32* @A
+; CHECK: @L3 = alias i32* @A
diff --git a/test/Transforms/GlobalDCE/global_ctors.ll b/test/Transforms/GlobalDCE/global_ctors.ll
new file mode 100644
index 0000000..91bb9ab
--- /dev/null
+++ b/test/Transforms/GlobalDCE/global_ctors.ll
@@ -0,0 +1,14 @@
+; RUN: opt -S -globaldce < %s | FileCheck %s
+
+; CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_notremovable }]
+; CHECK-NOT: @_GLOBAL__I_a
+
+declare void @_notremovable()
+
+@llvm.global_ctors = appending global [2 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }, { i32, void ()* } { i32 65535, void ()* @_notremovable }]
+
+; Function Attrs: nounwind readnone
+define internal void @_GLOBAL__I_a() #1 section "__TEXT,__StaticInit,regular,pure_instructions" {
+entry:
+ ret void
+}
diff --git a/test/Transforms/GlobalDCE/global_ctors_integration.ll b/test/Transforms/GlobalDCE/global_ctors_integration.ll
new file mode 100644
index 0000000..5e6cc79
--- /dev/null
+++ b/test/Transforms/GlobalDCE/global_ctors_integration.ll
@@ -0,0 +1,45 @@
+; RUN: opt -S -O2 < %s | FileCheck %s
+
+; This test checks that -O2 is able to delete constructors that become empty
+; only after some optimization passes have run, even if the pass structure
+; changes.
+; CHECK-NOT: @_GLOBAL__I_a
+
+%class.Foo = type { i32 }
+
+@foo = global %class.Foo zeroinitializer, align 4
+@_ZN3Bar18LINKER_INITIALIZEDE = external constant i32
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
+
+define internal void @__cxx_global_var_init() section "__TEXT,__StaticInit,regular,pure_instructions" {
+ %1 = load i32* @_ZN3Bar18LINKER_INITIALIZEDE, align 4
+ call void @_ZN3FooC1E17LinkerInitialized(%class.Foo* @foo, i32 %1)
+ ret void
+}
+
+; Function Attrs: ssp uwtable
+define linkonce_odr void @_ZN3FooC1E17LinkerInitialized(%class.Foo* %this, i32) unnamed_addr #0 align 2 {
+ %2 = alloca %class.Foo*, align 8
+ %3 = alloca i32, align 4
+ store %class.Foo* %this, %class.Foo** %2, align 8
+ store i32 %0, i32* %3, align 4
+ %4 = load %class.Foo** %2
+ %5 = load i32* %3, align 4
+ call void @_ZN3FooC2E17LinkerInitialized(%class.Foo* %4, i32 %5)
+ ret void
+}
+
+; Function Attrs: nounwind ssp uwtable
+define linkonce_odr void @_ZN3FooC2E17LinkerInitialized(%class.Foo* %this, i32) unnamed_addr #1 align 2 {
+ %2 = alloca %class.Foo*, align 8
+ %3 = alloca i32, align 4
+ store %class.Foo* %this, %class.Foo** %2, align 8
+ store i32 %0, i32* %3, align 4
+ %4 = load %class.Foo** %2
+ ret void
+}
+
+define internal void @_GLOBAL__I_a() section "__TEXT,__StaticInit,regular,pure_instructions" {
+ call void @__cxx_global_var_init()
+ ret void
+}
diff --git a/test/Transforms/GlobalMerge/ARM64/arm64.ll b/test/Transforms/GlobalMerge/AArch64/arm64.ll
index eea474a..eea474a 100644
--- a/test/Transforms/GlobalMerge/ARM64/arm64.ll
+++ b/test/Transforms/GlobalMerge/AArch64/arm64.ll
diff --git a/test/Transforms/GlobalMerge/AArch64/lit.local.cfg b/test/Transforms/GlobalMerge/AArch64/lit.local.cfg
new file mode 100644
index 0000000..9a66a00
--- /dev/null
+++ b/test/Transforms/GlobalMerge/AArch64/lit.local.cfg
@@ -0,0 +1,4 @@
+targets = set(config.root.targets_to_build.split())
+if not 'AArch64' in targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll b/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll
index d6a565a..03d6ee4 100644
--- a/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll
+++ b/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll
@@ -2,7 +2,7 @@
@g = global i32 0
-@a = alias bitcast (i32* @g to i8*)
+@a = alias i8, i32* @g
define void @f() {
%tmp = load i8* @a
diff --git a/test/Transforms/GlobalOpt/alias-resolve.ll b/test/Transforms/GlobalOpt/alias-resolve.ll
index 2d5a956..bd07b31 100644
--- a/test/Transforms/GlobalOpt/alias-resolve.ll
+++ b/test/Transforms/GlobalOpt/alias-resolve.ll
@@ -1,9 +1,9 @@
; RUN: opt < %s -globalopt -S | FileCheck %s
-@foo1 = alias void ()* @foo2
+@foo1 = alias void ()* @bar2
; CHECK: @foo1 = alias void ()* @bar2
-@foo2 = alias void()* @bar1
+@foo2 = alias void()* @bar2
; CHECK: @foo2 = alias void ()* @bar2
@bar1 = alias void ()* @bar2
diff --git a/test/Transforms/GlobalOpt/alias-used-section.ll b/test/Transforms/GlobalOpt/alias-used-section.ll
index 987c4a4..1217937 100644
--- a/test/Transforms/GlobalOpt/alias-used-section.ll
+++ b/test/Transforms/GlobalOpt/alias-used-section.ll
@@ -1,8 +1,8 @@
; RUN: opt -S -globalopt < %s | FileCheck %s
@_Z17in_custom_section = internal global i8 42, section "CUSTOM"
-@in_custom_section = protected dllexport alias internal i8* @_Z17in_custom_section
+@in_custom_section = dllexport alias internal i8* @_Z17in_custom_section
-; CHECK: @in_custom_section = internal protected dllexport global i8 42, section "CUSTOM"
+; CHECK: @in_custom_section = internal dllexport global i8 42, section "CUSTOM"
@llvm.used = appending global [1 x i8*] [i8* @in_custom_section], section "llvm.metadata"
diff --git a/test/Transforms/GlobalOpt/atexit.ll b/test/Transforms/GlobalOpt/atexit.ll
index dbcd0d7..55c2dab 100644
--- a/test/Transforms/GlobalOpt/atexit.ll
+++ b/test/Transforms/GlobalOpt/atexit.ll
@@ -1,6 +1,6 @@
; RUN: opt < %s -globalopt -S | FileCheck %s
; CHECK: ModuleID
-define internal hidden i32 @__cxa_atexit(void (i8*)* nocapture %func, i8* nocapture %arg, i8* nocapture %dso_handle) nounwind readnone optsize noimplicitfloat {
+define internal i32 @__cxa_atexit(void (i8*)* nocapture %func, i8* nocapture %arg, i8* nocapture %dso_handle) nounwind readnone optsize noimplicitfloat {
unreachable
}
diff --git a/test/Transforms/GlobalOpt/ctor-list-opt.ll b/test/Transforms/GlobalOpt/ctor-list-opt.ll
index 542c786..450bdb8 100644
--- a/test/Transforms/GlobalOpt/ctor-list-opt.ll
+++ b/test/Transforms/GlobalOpt/ctor-list-opt.ll
@@ -1,5 +1,20 @@
-; RUN: opt < %s -globalopt -S | not grep CTOR
-@llvm.global_ctors = appending global [11 x { i32, void ()* }] [ { i32, void ()* } { i32 65535, void ()* @CTOR1 }, { i32, void ()* } { i32 65535, void ()* @CTOR1 }, { i32, void ()* } { i32 65535, void ()* @CTOR2 }, { i32, void ()* } { i32 65535, void ()* @CTOR3 }, { i32, void ()* } { i32 65535, void ()* @CTOR4 }, { i32, void ()* } { i32 65535, void ()* @CTOR5 }, { i32, void ()* } { i32 65535, void ()* @CTOR6 }, { i32, void ()* } { i32 65535, void ()* @CTOR7 }, { i32, void ()* } { i32 65535, void ()* @CTOR8 }, { i32, void ()* } { i32 65535, void ()* @CTOR9 }, { i32, void ()* } { i32 2147483647, void ()* null } ] ; <[10 x { i32, void ()* }]*> [#uses=0]
+; RUN: opt < %s -globalopt -S | FileCheck %s
+; CHECK-NOT: CTOR
+%ini = type { i32, void()*, i8* }
+@llvm.global_ctors = appending global [11 x %ini] [
+ %ini { i32 65535, void ()* @CTOR1, i8* null },
+ %ini { i32 65535, void ()* @CTOR1, i8* null },
+ %ini { i32 65535, void ()* @CTOR2, i8* null },
+ %ini { i32 65535, void ()* @CTOR3, i8* null },
+ %ini { i32 65535, void ()* @CTOR4, i8* null },
+ %ini { i32 65535, void ()* @CTOR5, i8* null },
+ %ini { i32 65535, void ()* @CTOR6, i8* null },
+ %ini { i32 65535, void ()* @CTOR7, i8* null },
+ %ini { i32 65535, void ()* @CTOR8, i8* null },
+ %ini { i32 65535, void ()* @CTOR9, i8* null },
+ %ini { i32 2147483647, void ()* null, i8* null }
+]
+
@G = global i32 0 ; <i32*> [#uses=1]
@G2 = global i32 0 ; <i32*> [#uses=1]
@G3 = global i32 -123 ; <i32*> [#uses=2]
diff --git a/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll b/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll
index bd174a8..4ea0b88 100644
--- a/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll
+++ b/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll
@@ -1,4 +1,4 @@
-; RUN: llvm-as <%s | opt -ipsccp | llvm-dis | FileCheck %s
+; RUN: opt < %s -ipsccp -S | FileCheck %s
; Don't constant-propagate byval pointers, since they are not pointers!
; PR5038
%struct.MYstr = type { i8, i32 }
diff --git a/test/Transforms/IndVarSimplify/pr18223.ll b/test/Transforms/IndVarSimplify/pr18223.ll
new file mode 100644
index 0000000..738f75c
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/pr18223.ll
@@ -0,0 +1,30 @@
+; RUN: opt -indvars -S < %s | FileCheck %s
+
+; indvars should transform the phi node pair from the for-loop
+; CHECK-LABEL: @main(
+; CHECK: ret = phi i32 [ 0, %entry ], [ 0, {{.*}} ]
+
+@c = common global i32 0, align 4
+
+define i32 @main() #0 {
+entry:
+ %0 = load i32* @c, align 4
+ %tobool = icmp eq i32 %0, 0
+ br i1 %tobool, label %for.body, label %exit
+
+for.body:
+ %inc2 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
+ %sub = add i32 %inc2, -1
+ %cmp1 = icmp uge i32 %sub, %inc2
+ %conv = zext i1 %cmp1 to i32
+ br label %for.inc
+
+for.inc:
+ %inc = add nsw i32 %inc2, 1
+ %cmp = icmp slt i32 %inc, 5
+ br i1 %cmp, label %for.body, label %exit
+
+exit:
+ %ret = phi i32 [ 0, %entry ], [ %conv, %for.inc ]
+ ret i32 %ret
+}
diff --git a/test/Transforms/Inline/2010-05-31-ByvalTailcall.ll b/test/Transforms/Inline/2010-05-31-ByvalTailcall.ll
deleted file mode 100644
index b37b9f2..0000000
--- a/test/Transforms/Inline/2010-05-31-ByvalTailcall.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: opt < %s -tailcallelim -inline -instcombine -dse -S | FileCheck %s
-; PR7272
-
-; When inlining through a byval call site, the inliner creates allocas which may
-; be used by inlined calls, so any inlined calls need to have their 'tail' flags
-; cleared. If not then you can get nastiness like with this testcase, where the
-; (inlined) call to 'ext' in 'foo' was being passed an uninitialized value.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
-target triple = "i386-pc-linux-gnu"
-
-declare void @ext(i32*)
-
-define void @bar(i32* byval %x) {
- call void @ext(i32* %x)
- ret void
-}
-
-define void @foo(i32* %x) {
-; CHECK-LABEL: define void @foo(
-; CHECK: store i32 %1, i32* %x
- call void @bar(i32* byval %x)
- ret void
-}
diff --git a/test/Transforms/Inline/always-inline.ll b/test/Transforms/Inline/always-inline.ll
index a8703b8..5ad1bde 100644
--- a/test/Transforms/Inline/always-inline.ll
+++ b/test/Transforms/Inline/always-inline.ll
@@ -122,3 +122,14 @@ entry:
ret void
}
+define i32 @inner7() {
+ ret i32 1
+}
+define i32 @outer7() {
+; CHECK-LABEL: @outer7(
+; CHECK-NOT: call
+; CHECK: ret
+
+ %r = call i32 @inner7() alwaysinline
+ ret i32 %r
+}
diff --git a/test/Transforms/Inline/byval-tail-call.ll b/test/Transforms/Inline/byval-tail-call.ll
new file mode 100644
index 0000000..3a8906a
--- /dev/null
+++ b/test/Transforms/Inline/byval-tail-call.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -tailcallelim -inline -instcombine -dse -S | FileCheck %s
+; PR7272
+
+; Calls that capture byval parameters cannot be marked as tail calls. Other
+; tails that don't capture byval parameters can still be tail calls.
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
+target triple = "i386-pc-linux-gnu"
+
+declare void @ext(i32*)
+
+define void @bar(i32* byval %x) {
+ call void @ext(i32* %x)
+ ret void
+}
+
+define void @foo(i32* %x) {
+; CHECK-LABEL: define void @foo(
+; CHECK: llvm.lifetime.start
+; CHECK: store i32 %2, i32* %x
+ call void @bar(i32* byval %x)
+ ret void
+}
+
+define internal void @qux(i32* byval %x) {
+ call void @ext(i32* %x)
+ tail call void @ext(i32* null)
+ ret void
+}
+define void @frob(i32* %x) {
+; CHECK-LABEL: define void @frob(
+; CHECK: alloca i32
+; CHECK: {{^ *}}call void @ext(
+; CHECK: tail call void @ext(i32* null)
+; CHECK: ret void
+ tail call void @qux(i32* byval %x)
+ ret void
+}
diff --git a/test/Transforms/Inline/byval_lifetime.ll b/test/Transforms/Inline/byval_lifetime.ll
new file mode 100644
index 0000000..e8dff2a
--- /dev/null
+++ b/test/Transforms/Inline/byval_lifetime.ll
@@ -0,0 +1,26 @@
+; RUN: opt -S -inline < %s | FileCheck %s
+; END.
+
+; By inlining foo, an alloca is created in main to hold the byval argument, so
+; a lifetime marker should be generated as well by default.
+
+%struct.foo = type { i32, [16 x i32] }
+
+@gFoo = global %struct.foo zeroinitializer, align 8
+
+define i32 @foo(%struct.foo* byval align 8 %f, i32 %a) {
+entry:
+ %a1 = getelementptr inbounds %struct.foo* %f, i32 0, i32 1
+ %arrayidx = getelementptr inbounds [16 x i32]* %a1, i32 0, i32 %a
+ %tmp2 = load i32* %arrayidx, align 1
+ ret i32 %tmp2
+}
+
+define i32 @main(i32 %argc, i8** %argv) {
+; CHECK-LABEL: @main
+; CHECK: llvm.lifetime.start
+; CHECK: memcpy
+entry:
+ %call = call i32 @foo(%struct.foo* byval align 8 @gFoo, i32 %argc)
+ ret i32 %call
+}
diff --git a/test/Transforms/Inline/inline-cold.ll b/test/Transforms/Inline/inline-cold.ll
index bb8c008..5743377 100644
--- a/test/Transforms/Inline/inline-cold.ll
+++ b/test/Transforms/Inline/inline-cold.ll
@@ -1,8 +1,15 @@
; RUN: opt < %s -inline -S -inlinecold-threshold=75 | FileCheck %s
-
; Test that functions with attribute Cold are not inlined while the
; same function without attribute Cold will be inlined.
+; RUN: opt < %s -inline -S -inline-threshold=600 | FileCheck %s -check-prefix=OVERRIDE
+; The command line argument for inline-threshold should override
+; the default cold threshold, so a cold function with size bigger
+; than the default cold threshold (225) will be inlined.
+
+; RUN: opt < %s -inline -S | FileCheck %s -check-prefix=DEFAULT
+; The same cold function will not be inlined with the default behavior.
+
@a = global i32 4
; This function should be larger than the cold threshold (75), but smaller
@@ -42,6 +49,10 @@ entry:
define i32 @ColdFunction(i32 %a) #1 {
; CHECK-LABEL: @ColdFunction
; CHECK: ret
+; OVERRIDE-LABEL: @ColdFunction
+; OVERRIDE: ret
+; DEFAULT-LABEL: @ColdFunction
+; DEFAULT: ret
entry:
%a1 = load volatile i32* @a
%x1 = add i32 %a1, %a1
@@ -71,16 +82,117 @@ entry:
ret i32 %add
}
+; This function should be larger than the default cold threshold (225).
+define i32 @ColdFunction2(i32 %a) #1 {
+; CHECK-LABEL: @ColdFunction2
+; CHECK: ret
+; OVERRIDE-LABEL: @ColdFunction2
+; OVERRIDE: ret
+; DEFAULT-LABEL: @ColdFunction2
+; DEFAULT: ret
+entry:
+ %a1 = load volatile i32* @a
+ %x1 = add i32 %a1, %a1
+ %a2 = load volatile i32* @a
+ %x2 = add i32 %x1, %a2
+ %a3 = load volatile i32* @a
+ %x3 = add i32 %x2, %a3
+ %a4 = load volatile i32* @a
+ %x4 = add i32 %x3, %a4
+ %a5 = load volatile i32* @a
+ %x5 = add i32 %x4, %a5
+ %a6 = load volatile i32* @a
+ %x6 = add i32 %x5, %a6
+ %a7 = load volatile i32* @a
+ %x7 = add i32 %x6, %a7
+ %a8 = load volatile i32* @a
+ %x8 = add i32 %x7, %a8
+ %a9 = load volatile i32* @a
+ %x9 = add i32 %x8, %a9
+ %a10 = load volatile i32* @a
+ %x10 = add i32 %x9, %a10
+ %a11 = load volatile i32* @a
+ %x11 = add i32 %x10, %a11
+ %a12 = load volatile i32* @a
+ %x12 = add i32 %x11, %a12
+
+ %a21 = load volatile i32* @a
+ %x21 = add i32 %x12, %a21
+ %a22 = load volatile i32* @a
+ %x22 = add i32 %x21, %a22
+ %a23 = load volatile i32* @a
+ %x23 = add i32 %x22, %a23
+ %a24 = load volatile i32* @a
+ %x24 = add i32 %x23, %a24
+ %a25 = load volatile i32* @a
+ %x25 = add i32 %x24, %a25
+ %a26 = load volatile i32* @a
+ %x26 = add i32 %x25, %a26
+ %a27 = load volatile i32* @a
+ %x27 = add i32 %x26, %a27
+ %a28 = load volatile i32* @a
+ %x28 = add i32 %x27, %a28
+ %a29 = load volatile i32* @a
+ %x29 = add i32 %x28, %a29
+ %a30 = load volatile i32* @a
+ %x30 = add i32 %x29, %a30
+ %a31 = load volatile i32* @a
+ %x31 = add i32 %x30, %a31
+ %a32 = load volatile i32* @a
+ %x32 = add i32 %x31, %a32
+
+ %a41 = load volatile i32* @a
+ %x41 = add i32 %x32, %a41
+ %a42 = load volatile i32* @a
+ %x42 = add i32 %x41, %a42
+ %a43 = load volatile i32* @a
+ %x43 = add i32 %x42, %a43
+ %a44 = load volatile i32* @a
+ %x44 = add i32 %x43, %a44
+ %a45 = load volatile i32* @a
+ %x45 = add i32 %x44, %a45
+ %a46 = load volatile i32* @a
+ %x46 = add i32 %x45, %a46
+ %a47 = load volatile i32* @a
+ %x47 = add i32 %x46, %a47
+ %a48 = load volatile i32* @a
+ %x48 = add i32 %x47, %a48
+ %a49 = load volatile i32* @a
+ %x49 = add i32 %x48, %a49
+ %a50 = load volatile i32* @a
+ %x50 = add i32 %x49, %a50
+ %a51 = load volatile i32* @a
+ %x51 = add i32 %x50, %a51
+ %a52 = load volatile i32* @a
+ %x52 = add i32 %x51, %a52
+
+ %add = add i32 %x52, %a
+ ret i32 %add
+}
+
; Function Attrs: nounwind readnone uwtable
define i32 @bar(i32 %a) #0 {
; CHECK-LABEL: @bar
; CHECK: call i32 @ColdFunction(i32 5)
; CHECK-NOT: call i32 @simpleFunction(i32 6)
+; CHECK: call i32 @ColdFunction2(i32 5)
; CHECK: ret
+; OVERRIDE-LABEL: @bar
+; OVERRIDE-NOT: call i32 @ColdFunction(i32 5)
+; OVERRIDE-NOT: call i32 @simpleFunction(i32 6)
+; OVERRIDE-NOT: call i32 @ColdFunction2(i32 5)
+; OVERRIDE: ret
+; DEFAULT-LABEL: @bar
+; DEFAULT-NOT: call i32 @ColdFunction(i32 5)
+; DEFAULT-NOT: call i32 @simpleFunction(i32 6)
+; DEFAULT: call i32 @ColdFunction2(i32 5)
+; DEFAULT: ret
entry:
%0 = tail call i32 @ColdFunction(i32 5)
%1 = tail call i32 @simpleFunction(i32 6)
- %add = add i32 %0, %1
+ %2 = tail call i32 @ColdFunction2(i32 5)
+ %3 = add i32 %0, %1
+ %add = add i32 %2, %3
ret i32 %add
}
diff --git a/test/Transforms/Inline/inline-tail.ll b/test/Transforms/Inline/inline-tail.ll
index 8bb059d..b40328e 100644
--- a/test/Transforms/Inline/inline-tail.ll
+++ b/test/Transforms/Inline/inline-tail.ll
@@ -1,15 +1,182 @@
-; RUN: opt < %s -inline -S | not grep tail
+; RUN: opt < %s -inline -S | FileCheck %s
-declare void @bar(i32*)
+; We have to apply the less restrictive TailCallKind of the call site being
+; inlined and any call sites cloned into the caller.
-define internal void @foo(i32* %P) {
- tail call void @bar( i32* %P )
- ret void
+; No tail marker after inlining, since test_capture_c captures an alloca.
+; CHECK: define void @test_capture_a(
+; CHECK-NOT: tail
+; CHECK: call void @test_capture_c(
+
+declare void @test_capture_c(i32*)
+define internal void @test_capture_b(i32* %P) {
+ tail call void @test_capture_c(i32* %P)
+ ret void
+}
+define void @test_capture_a() {
+ %A = alloca i32 ; captured by test_capture_b
+ call void @test_capture_b(i32* %A)
+ ret void
+}
+
+; No musttail marker after inlining, since the prototypes don't match.
+; CHECK: define void @test_proto_mismatch_a(
+; CHECK-NOT: musttail
+; CHECK: call void @test_proto_mismatch_c(
+
+declare void @test_proto_mismatch_c(i32*)
+define internal void @test_proto_mismatch_b(i32* %p) {
+ musttail call void @test_proto_mismatch_c(i32* %p)
+ ret void
+}
+define void @test_proto_mismatch_a() {
+ call void @test_proto_mismatch_b(i32* null)
+ ret void
+}
+
+; After inlining through a musttail call site, we need to keep musttail markers
+; to prevent unbounded stack growth.
+; CHECK: define void @test_musttail_basic_a(
+; CHECK: musttail call void @test_musttail_basic_c(
+
+declare void @test_musttail_basic_c(i32* %p)
+define internal void @test_musttail_basic_b(i32* %p) {
+ musttail call void @test_musttail_basic_c(i32* %p)
+ ret void
+}
+define void @test_musttail_basic_a(i32* %p) {
+ musttail call void @test_musttail_basic_b(i32* %p)
+ ret void
+}
+
+; Don't insert lifetime end markers here, the lifetime is trivially over due
+; the return.
+; CHECK: define void @test_byval_a(
+; CHECK: musttail call void @test_byval_c(
+; CHECK-NEXT: ret void
+
+declare void @test_byval_c(i32* byval %p)
+define internal void @test_byval_b(i32* byval %p) {
+ musttail call void @test_byval_c(i32* byval %p)
+ ret void
+}
+define void @test_byval_a(i32* byval %p) {
+ musttail call void @test_byval_b(i32* byval %p)
+ ret void
}
-define void @caller() {
- %A = alloca i32 ; <i32*> [#uses=1]
- call void @foo( i32* %A )
- ret void
+; Don't insert a stack restore, we're about to return.
+; CHECK: define void @test_dynalloca_a(
+; CHECK: call i8* @llvm.stacksave(
+; CHECK: alloca i8, i32 %n
+; CHECK: musttail call void @test_dynalloca_c(
+; CHECK-NEXT: ret void
+
+declare void @escape(i8* %buf)
+declare void @test_dynalloca_c(i32* byval %p, i32 %n)
+define internal void @test_dynalloca_b(i32* byval %p, i32 %n) alwaysinline {
+ %buf = alloca i8, i32 %n ; dynamic alloca
+ call void @escape(i8* %buf) ; escape it
+ musttail call void @test_dynalloca_c(i32* byval %p, i32 %n)
+ ret void
+}
+define void @test_dynalloca_a(i32* byval %p, i32 %n) {
+ musttail call void @test_dynalloca_b(i32* byval %p, i32 %n)
+ ret void
}
+; We can't merge the returns.
+; CHECK: define void @test_multiret_a(
+; CHECK: musttail call void @test_multiret_c(
+; CHECK-NEXT: ret void
+; CHECK: musttail call void @test_multiret_d(
+; CHECK-NEXT: ret void
+
+declare void @test_multiret_c(i1 zeroext %b)
+declare void @test_multiret_d(i1 zeroext %b)
+define internal void @test_multiret_b(i1 zeroext %b) {
+ br i1 %b, label %c, label %d
+c:
+ musttail call void @test_multiret_c(i1 zeroext %b)
+ ret void
+d:
+ musttail call void @test_multiret_d(i1 zeroext %b)
+ ret void
+}
+define void @test_multiret_a(i1 zeroext %b) {
+ musttail call void @test_multiret_b(i1 zeroext %b)
+ ret void
+}
+
+; We have to avoid bitcast chains.
+; CHECK: define i32* @test_retptr_a(
+; CHECK: musttail call i8* @test_retptr_c(
+; CHECK-NEXT: bitcast i8* {{.*}} to i32*
+; CHECK-NEXT: ret i32*
+
+declare i8* @test_retptr_c()
+define internal i16* @test_retptr_b() {
+ %rv = musttail call i8* @test_retptr_c()
+ %v = bitcast i8* %rv to i16*
+ ret i16* %v
+}
+define i32* @test_retptr_a() {
+ %rv = musttail call i16* @test_retptr_b()
+ %v = bitcast i16* %rv to i32*
+ ret i32* %v
+}
+
+; Combine the last two cases: multiple returns with pointer bitcasts.
+; CHECK: define i32* @test_multiptrret_a(
+; CHECK: musttail call i8* @test_multiptrret_c(
+; CHECK-NEXT: bitcast i8* {{.*}} to i32*
+; CHECK-NEXT: ret i32*
+; CHECK: musttail call i8* @test_multiptrret_d(
+; CHECK-NEXT: bitcast i8* {{.*}} to i32*
+; CHECK-NEXT: ret i32*
+
+declare i8* @test_multiptrret_c(i1 zeroext %b)
+declare i8* @test_multiptrret_d(i1 zeroext %b)
+define internal i16* @test_multiptrret_b(i1 zeroext %b) {
+ br i1 %b, label %c, label %d
+c:
+ %c_rv = musttail call i8* @test_multiptrret_c(i1 zeroext %b)
+ %c_v = bitcast i8* %c_rv to i16*
+ ret i16* %c_v
+d:
+ %d_rv = musttail call i8* @test_multiptrret_d(i1 zeroext %b)
+ %d_v = bitcast i8* %d_rv to i16*
+ ret i16* %d_v
+}
+define i32* @test_multiptrret_a(i1 zeroext %b) {
+ %rv = musttail call i16* @test_multiptrret_b(i1 zeroext %b)
+ %v = bitcast i16* %rv to i32*
+ ret i32* %v
+}
+
+; Inline a musttail call site which contains a normal return and a musttail call.
+; CHECK: define i32 @test_mixedret_a(
+; CHECK: br i1 %b
+; CHECK: musttail call i32 @test_mixedret_c(
+; CHECK-NEXT: ret i32
+; CHECK: call i32 @test_mixedret_d(i1 zeroext %b)
+; CHECK: add i32 1,
+; CHECK-NOT: br
+; CHECK: ret i32
+
+declare i32 @test_mixedret_c(i1 zeroext %b)
+declare i32 @test_mixedret_d(i1 zeroext %b)
+define internal i32 @test_mixedret_b(i1 zeroext %b) {
+ br i1 %b, label %c, label %d
+c:
+ %c_rv = musttail call i32 @test_mixedret_c(i1 zeroext %b)
+ ret i32 %c_rv
+d:
+ %d_rv = call i32 @test_mixedret_d(i1 zeroext %b)
+ %d_rv1 = add i32 1, %d_rv
+ ret i32 %d_rv1
+}
+define i32 @test_mixedret_a(i1 zeroext %b) {
+ %rv = musttail call i32 @test_mixedret_b(i1 zeroext %b)
+ ret i32 %rv
+}
diff --git a/test/Transforms/Inline/inline-vla.ll b/test/Transforms/Inline/inline-vla.ll
new file mode 100644
index 0000000..dc9deaa
--- /dev/null
+++ b/test/Transforms/Inline/inline-vla.ll
@@ -0,0 +1,38 @@
+; RUN: opt -S -inline %s -o - | FileCheck %s
+
+; Check that memcpy2 is completely inlined away.
+; CHECK-NOT: memcpy2
+
+@.str = private unnamed_addr constant [2 x i8] c"a\00", align 1
+@.str1 = private unnamed_addr constant [3 x i8] c"ab\00", align 1
+
+; Function Attrs: nounwind ssp uwtable
+define i32 @main(i32 %argc, i8** nocapture readnone %argv) #0 {
+entry:
+ %data = alloca [2 x i8], align 1
+ %arraydecay = getelementptr inbounds [2 x i8]* %data, i64 0, i64 0
+ call fastcc void @memcpy2(i8* %arraydecay, i8* getelementptr inbounds ([2 x i8]* @.str, i64 0, i64 0), i64 1)
+ call fastcc void @memcpy2(i8* %arraydecay, i8* getelementptr inbounds ([3 x i8]* @.str1, i64 0, i64 0), i64 2)
+ ret i32 0
+}
+
+; Function Attrs: inlinehint nounwind ssp uwtable
+define internal fastcc void @memcpy2(i8* nocapture %dst, i8* nocapture readonly %src, i64 %size) #1 {
+entry:
+ %vla = alloca i64, i64 %size, align 16
+ %0 = bitcast i64* %vla to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %src, i64 %size, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %0, i64 %size, i32 1, i1 false)
+ ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #2
+
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { inlinehint nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5.0 (trunk 205695) (llvm/trunk 205706)"}
diff --git a/test/Transforms/Inline/optimization-remarks.ll b/test/Transforms/Inline/optimization-remarks.ll
new file mode 100644
index 0000000..9108f3a
--- /dev/null
+++ b/test/Transforms/Inline/optimization-remarks.ll
@@ -0,0 +1,60 @@
+; RUN: opt < %s -inline -pass-remarks=inline -pass-remarks-missed=inline -pass-remarks-analysis=inline -S 2>&1 | FileCheck %s
+
+; CHECK: foo should always be inlined (cost=always)
+; CHECK: foo inlined into bar
+; CHECK: foz should never be inlined (cost=never)
+; CHECK: foz will not be inlined into bar
+
+; Function Attrs: alwaysinline nounwind uwtable
+define i32 @foo(i32 %x, i32 %y) #0 {
+entry:
+ %x.addr = alloca i32, align 4
+ %y.addr = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ store i32 %y, i32* %y.addr, align 4
+ %0 = load i32* %x.addr, align 4
+ %1 = load i32* %y.addr, align 4
+ %add = add nsw i32 %0, %1
+ ret i32 %add
+}
+
+; Function Attrs: noinline nounwind uwtable
+define float @foz(i32 %x, i32 %y) #1 {
+entry:
+ %x.addr = alloca i32, align 4
+ %y.addr = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ store i32 %y, i32* %y.addr, align 4
+ %0 = load i32* %x.addr, align 4
+ %1 = load i32* %y.addr, align 4
+ %mul = mul nsw i32 %0, %1
+ %conv = sitofp i32 %mul to float
+ ret float %conv
+}
+
+; Function Attrs: nounwind uwtable
+define i32 @bar(i32 %j) #2 {
+entry:
+ %j.addr = alloca i32, align 4
+ store i32 %j, i32* %j.addr, align 4
+ %0 = load i32* %j.addr, align 4
+ %1 = load i32* %j.addr, align 4
+ %sub = sub nsw i32 %1, 2
+ %call = call i32 @foo(i32 %0, i32 %sub)
+ %conv = sitofp i32 %call to float
+ %2 = load i32* %j.addr, align 4
+ %sub1 = sub nsw i32 %2, 2
+ %3 = load i32* %j.addr, align 4
+ %call2 = call float @foz(i32 %sub1, i32 %3)
+ %mul = fmul float %conv, %call2
+ %conv3 = fptosi float %mul to i32
+ ret i32 %conv3
+}
+
+attributes #0 = { alwaysinline nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { noinline nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5.0 "}
diff --git a/test/Transforms/Inline/switch.ll b/test/Transforms/Inline/switch.ll
new file mode 100644
index 0000000..c5dab53
--- /dev/null
+++ b/test/Transforms/Inline/switch.ll
@@ -0,0 +1,60 @@
+; RUN: opt < %s -inline -inline-threshold=20 -S | FileCheck %s
+
+define i32 @callee(i32 %a) {
+ switch i32 %a, label %sw.default [
+ i32 0, label %sw.bb0
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ i32 3, label %sw.bb3
+ i32 4, label %sw.bb4
+ i32 5, label %sw.bb5
+ i32 6, label %sw.bb6
+ i32 7, label %sw.bb7
+ i32 8, label %sw.bb8
+ i32 9, label %sw.bb9
+ ]
+
+sw.default:
+ br label %return
+
+sw.bb0:
+ br label %return
+
+sw.bb1:
+ br label %return
+
+sw.bb2:
+ br label %return
+
+sw.bb3:
+ br label %return
+
+sw.bb4:
+ br label %return
+
+sw.bb5:
+ br label %return
+
+sw.bb6:
+ br label %return
+
+sw.bb7:
+ br label %return
+
+sw.bb8:
+ br label %return
+
+sw.bb9:
+ br label %return
+
+return:
+ ret i32 42
+}
+
+define i32 @caller(i32 %a) {
+; CHECK-LABEL: @caller(
+; CHECK: call i32 @callee(
+
+ %result = call i32 @callee(i32 %a)
+ ret i32 %result
+}
diff --git a/test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll b/test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll
index 1883a8f..39408a2 100644
--- a/test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll
+++ b/test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll
@@ -68,7 +68,7 @@ declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind rea
define <4 x i32> @mulByZeroARM64(<4 x i16> %x) nounwind readnone ssp {
entry:
- %a = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %x, <4 x i16> zeroinitializer) nounwind
+ %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %x, <4 x i16> zeroinitializer) nounwind
ret <4 x i32> %a
; CHECK: entry:
; CHECK-NEXT: ret <4 x i32> zeroinitializer
@@ -76,7 +76,7 @@ entry:
define <4 x i32> @mulByOneARM64(<4 x i16> %x) nounwind readnone ssp {
entry:
- %a = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %x, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
+ %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %x, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
ret <4 x i32> %a
; CHECK: entry:
; CHECK-NEXT: %a = sext <4 x i16> %x to <4 x i32>
@@ -85,7 +85,7 @@ entry:
define <4 x i32> @constantMulARM64() nounwind readnone ssp {
entry:
- %a = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x i16> <i16 2, i16 2, i16 2, i16 2>) nounwind
+ %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x i16> <i16 2, i16 2, i16 2, i16 2>) nounwind
ret <4 x i32> %a
; CHECK: entry:
; CHECK-NEXT: ret <4 x i32> <i32 6, i32 6, i32 6, i32 6>
@@ -93,7 +93,7 @@ entry:
define <4 x i32> @constantMulSARM64() nounwind readnone ssp {
entry:
- %b = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
+ %b = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
ret <4 x i32> %b
; CHECK: entry:
; CHECK-NEXT: ret <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -101,7 +101,7 @@ entry:
define <4 x i32> @constantMulUARM64() nounwind readnone ssp {
entry:
- %b = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
+ %b = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
ret <4 x i32> %b
; CHECK: entry:
; CHECK-NEXT: ret <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>
@@ -109,17 +109,17 @@ entry:
define <4 x i32> @complex1ARM64(<4 x i16> %x) nounwind readnone ssp {
entry:
- %a = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x i16> %x) nounwind
+ %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x i16> %x) nounwind
%b = add <4 x i32> zeroinitializer, %a
ret <4 x i32> %b
; CHECK: entry:
-; CHECK-NEXT: %a = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x i16> %x) [[NUW:#[0-9]+]]
+; CHECK-NEXT: %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x i16> %x) [[NUW:#[0-9]+]]
; CHECK-NEXT: ret <4 x i32> %a
}
define <4 x i32> @complex2ARM64(<4 x i32> %x) nounwind readnone ssp {
entry:
- %a = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x i16> <i16 2, i16 2, i16 2, i16 2>) nounwind
+ %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x i16> <i16 2, i16 2, i16 2, i16 2>) nounwind
%b = add <4 x i32> %x, %a
ret <4 x i32> %b
; CHECK: entry:
@@ -127,8 +127,8 @@ entry:
; CHECK-NEXT: ret <4 x i32> %b
}
-declare <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
; CHECK: attributes #0 = { nounwind readnone ssp }
; CHECK: attributes #1 = { nounwind readnone }
diff --git a/test/Transforms/InstCombine/OverlappingInsertvalues.ll b/test/Transforms/InstCombine/OverlappingInsertvalues.ll
new file mode 100644
index 0000000..9248aec
--- /dev/null
+++ b/test/Transforms/InstCombine/OverlappingInsertvalues.ll
@@ -0,0 +1,36 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Check that we can find and remove redundant insertvalues
+; CHECK-LABEL: foo_simple
+; CHECK-NOT: i8* %x, 0
+define { i8*, i64, i32 } @foo_simple(i8* %x, i8* %y) nounwind {
+entry:
+ %0 = insertvalue { i8*, i64, i32 } undef, i8* %x, 0
+ %1 = insertvalue { i8*, i64, i32 } %0, i8* %y, 0
+ ret { i8*, i64, i32 } %1
+}
+; Check that we can find and remove redundant nodes in insertvalues chain
+; CHECK-LABEL: foo_ovwrt_chain
+; CHECK-NOT: i64 %y, 1
+; CHECK-NOT: i32 555, 2
+define { i8*, i64, i32 } @foo_ovwrt_chain(i8* %x, i64 %y, i64 %z) nounwind {
+entry:
+ %0 = insertvalue { i8*, i64, i32 } undef, i8* %x, 0
+ %1 = insertvalue { i8*, i64, i32 } %0, i64 %y, 1
+ %2 = insertvalue { i8*, i64, i32 } %1, i32 555, 2
+ %3 = insertvalue { i8*, i64, i32 } %2, i64 %z, 1
+ %4 = insertvalue { i8*, i64, i32 } %3, i32 777, 2
+ ret { i8*, i64, i32 } %4
+}
+; Check that we propagate insertvalues only if they are use as the first
+; operand (as initial value of aggregate)
+; CHECK-LABEL: foo_use_as_second_operand
+; CHECK: i16 %x, 0
+; CHECK: %0, 1
+define { i8, {i16, i32} } @foo_use_as_second_operand(i16 %x) nounwind {
+entry:
+ %0 = insertvalue { i16, i32 } undef, i16 %x, 0
+ %1 = insertvalue { i8, {i16, i32} } undef, { i16, i32 } %0, 1
+ ret { i8, {i16, i32} } %1
+}
diff --git a/test/Transforms/InstCombine/alloca.ll b/test/Transforms/InstCombine/alloca.ll
index ae1cfa1..6d0c131 100644
--- a/test/Transforms/InstCombine/alloca.ll
+++ b/test/Transforms/InstCombine/alloca.ll
@@ -129,3 +129,24 @@ define void @test8() {
call void (...)* @use(i32* %x)
ret void
}
+
+; PR19569
+%struct_type = type { i32, i32 }
+declare void @test9_aux(<{ %struct_type }>* inalloca)
+declare i8* @llvm.stacksave()
+declare void @llvm.stackrestore(i8*)
+
+define void @test9(%struct_type* %a) {
+; CHECK-LABEL: @test9(
+entry:
+ %inalloca.save = call i8* @llvm.stacksave()
+ %argmem = alloca inalloca <{ %struct_type }>
+; CHECK: alloca inalloca i64, align 8
+ %0 = getelementptr inbounds <{ %struct_type }>* %argmem, i32 0, i32 0
+ %1 = bitcast %struct_type* %0 to i8*
+ %2 = bitcast %struct_type* %a to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %2, i32 8, i32 4, i1 false)
+ call void @test9_aux(<{ %struct_type }>* inalloca %argmem)
+ call void @llvm.stackrestore(i8* %inalloca.save)
+ ret void
+}
diff --git a/test/Transforms/InstCombine/bitcast-alias-function.ll b/test/Transforms/InstCombine/bitcast-alias-function.ll
index a6b56f9..284960b 100644
--- a/test/Transforms/InstCombine/bitcast-alias-function.ll
+++ b/test/Transforms/InstCombine/bitcast-alias-function.ll
@@ -6,46 +6,46 @@ target datalayout = "e-p:32:32:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16
; Cases that should be bitcast
; Test cast between scalars with same bit sizes
-@alias_i32_to_f32 = alias bitcast (i32 (i32)* @func_i32 to float (float)*)
+@alias_i32_to_f32 = alias float (float), i32 (i32)* @func_i32
; Test cast between vectors with same number of elements and bit sizes
-@alias_v2i32_to_v2f32 = alias bitcast (<2 x i32> (<2 x i32>)* @func_v2i32 to <2 x float> (<2 x float>)*)
+@alias_v2i32_to_v2f32 = alias <2 x float> (<2 x float>), <2 x i32> (<2 x i32>)* @func_v2i32
; Test cast from vector to scalar with same number of bits
-@alias_v2f32_to_i64 = alias bitcast (i64 (i64)* @func_i64 to <2 x float> (<2 x float>)*)
+@alias_v2f32_to_i64 = alias <2 x float> (<2 x float>), i64 (i64)* @func_i64
; Test cast from scalar to vector with same number of bits
-@alias_i64_to_v2f32 = alias bitcast (<2 x float> (<2 x float>)* @func_v2f32 to i64 (i64)*)
+@alias_i64_to_v2f32 = alias i64 (i64), <2 x float> (<2 x float>)* @func_v2f32
; Test cast between vectors of pointers
-@alias_v2i32p_to_v2i64p = alias bitcast (<2 x i32*> (<2 x i32*>)* @func_v2i32p to <2 x i64*> (<2 x i64*>)*)
+@alias_v2i32p_to_v2i64p = alias <2 x i64*> (<2 x i64*>), <2 x i32*> (<2 x i32*>)* @func_v2i32p
; Cases that should be invalid and unchanged
; Test cast between scalars with different bit sizes
-@alias_i64_to_f32 = alias bitcast (i64 (i64)* @func_i64 to float (float)*)
+@alias_i64_to_f32 = alias float (float), i64 (i64)* @func_i64
; Test cast between vectors with different bit sizes but the
; same number of elements
-@alias_v2i64_to_v2f32 = alias bitcast (<2 x i64> (<2 x i64>)* @func_v2i64 to <2 x float> (<2 x float>)*)
+@alias_v2i64_to_v2f32 = alias <2 x float> (<2 x float>), <2 x i64> (<2 x i64>)* @func_v2i64
; Test cast between vectors with same number of bits and different
; numbers of elements
-@alias_v2i32_to_v4f32 = alias bitcast (<2 x i32> (<2 x i32>)* @func_v2i32 to <4 x float> (<4 x float>)*)
+@alias_v2i32_to_v4f32 = alias <4 x float> (<4 x float>), <2 x i32> (<2 x i32>)* @func_v2i32
; Test cast between scalar and vector with different number of bits
-@alias_i64_to_v4f32 = alias bitcast (<4 x float> (<4 x float>)* @func_v4f32 to i64 (i64)*)
+@alias_i64_to_v4f32 = alias i64 (i64), <4 x float> (<4 x float>)* @func_v4f32
; Test cast between vector and scalar with different number of bits
-@alias_v4f32_to_i64 = alias bitcast (i64 (i64)* @func_i64 to <4 x float> (<4 x float>)*)
+@alias_v4f32_to_i64 = alias <4 x float> (<4 x float>), i64 (i64)* @func_i64
; Test cast from scalar to vector of pointers with same number of bits
; We don't know the pointer size at this point, so this can't be done
-@alias_i64_to_v2i32p = alias bitcast (<2 x i32*> (<2 x i32*>)* @func_v2i32p to i64 (i64)*)
+@alias_i64_to_v2i32p = alias i64 (i64), <2 x i32*> (<2 x i32*>)* @func_v2i32p
; Test cast between vector of pointers and scalar with different number of bits
-@alias_v4i32p_to_i64 = alias bitcast (i64 (i64)* @func_i64 to <4 x i32*> (<4 x i32*>)*)
+@alias_v4i32p_to_i64 = alias <4 x i32*> (<4 x i32*>), i64 (i64)* @func_i64
diff --git a/test/Transforms/InstCombine/blend_x86.ll b/test/Transforms/InstCombine/blend_x86.ll
new file mode 100644
index 0000000..778d44b
--- /dev/null
+++ b/test/Transforms/InstCombine/blend_x86.ll
@@ -0,0 +1,55 @@
+; RUN: opt < %s -instcombine -mtriple=x86_64-apple-macosx -mcpu=core-avx2 -S | FileCheck %s
+
+define <2 x double> @constant_blendvpd(<2 x double> %xy, <2 x double> %ab) {
+; CHECK-LABEL: @constant_blendvpd
+; CHECK: select <2 x i1> <i1 true, i1 false>, <2 x double> %ab, <2 x double> %xy
+ %1 = tail call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %xy, <2 x double> %ab, <2 x double> <double 0xFFFFFFFFE0000000, double 0.000000e+00>)
+ ret <2 x double> %1
+}
+
+define <4 x float> @constant_blendvps(<4 x float> %xyzw, <4 x float> %abcd) {
+; CHECK-LABEL: @constant_blendvps
+; CHECK: select <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> %abcd, <4 x float> %xyzw
+ %1 = tail call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %xyzw, <4 x float> %abcd, <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0xFFFFFFFFE0000000>)
+ ret <4 x float> %1
+}
+
+define <16 x i8> @constant_pblendvb(<16 x i8> %xyzw, <16 x i8> %abcd) {
+; CHECK-LABEL: @constant_pblendvb
+; CHECK: select <16 x i1> <i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x i8> %abcd, <16 x i8> %xyzw
+ %1 = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %xyzw, <16 x i8> %abcd, <16 x i8> <i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0>)
+ ret <16 x i8> %1
+}
+
+define <4 x double> @constant_blendvpd_avx(<4 x double> %xy, <4 x double> %ab) {
+; CHECK-LABEL: @constant_blendvpd_avx
+; CHECK: select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x double> %ab, <4 x double> %xy
+ %1 = tail call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %xy, <4 x double> %ab, <4 x double> <double 0xFFFFFFFFE0000000, double 0.000000e+00, double 0xFFFFFFFFE0000000, double 0.000000e+00>)
+ ret <4 x double> %1
+}
+
+define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd) {
+; CHECK-LABEL: @constant_blendvps_avx
+; CHECK: select <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true>, <8 x float> %abcd, <8 x float> %xyzw
+ %1 = tail call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %xyzw, <8 x float> %abcd, <8 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0xFFFFFFFFE0000000, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0xFFFFFFFFE0000000>)
+ ret <8 x float> %1
+}
+
+define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
+; CHECK-LABEL: @constant_pblendvb_avx2
+; CHECK: select <32 x i1> <i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <32 x i8> %abcd, <32 x i8> %xyzw
+ %1 = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %xyzw, <32 x i8> %abcd,
+ <32 x i8> <i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0,
+ i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0,
+ i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0,
+ i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0>)
+ ret <32 x i8> %1
+}
+
+declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)
+declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>)
+declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>)
+
+declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>)
+declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>)
+declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>)
diff --git a/test/Transforms/InstCombine/call-cast-target-inalloca.ll b/test/Transforms/InstCombine/call-cast-target-inalloca.ll
index baf97e0..90289e2 100644
--- a/test/Transforms/InstCombine/call-cast-target-inalloca.ll
+++ b/test/Transforms/InstCombine/call-cast-target-inalloca.ll
@@ -8,7 +8,7 @@ declare void @takes_i32_inalloca(i32* inalloca)
define void @f() {
; CHECK-LABEL: define void @f()
- %args = alloca i32
+ %args = alloca inalloca i32
call void bitcast (void (i32)* @takes_i32 to void (i32*)*)(i32* inalloca %args)
; CHECK: call void bitcast
ret void
diff --git a/test/Transforms/InstCombine/div.ll b/test/Transforms/InstCombine/div.ll
index 1bf486f..9c7ba9b 100644
--- a/test/Transforms/InstCombine/div.ll
+++ b/test/Transforms/InstCombine/div.ll
@@ -156,3 +156,22 @@ define <2 x i64> @test18(<2 x i64> %x) nounwind {
; CHECK-NEXT: sub <2 x i64> zeroinitializer, %x
; CHECK-NEXT: ret <2 x i64>
}
+
+define i32 @test19(i32 %x) {
+ %A = udiv i32 1, %x
+ ret i32 %A
+; CHECK-LABEL: @test19(
+; CHECK-NEXT: icmp eq i32 %x, 1
+; CHECK-NEXT: zext i1 %{{.*}} to i32
+; CHECK-NEXT ret i32
+}
+
+define i32 @test20(i32 %x) {
+ %A = sdiv i32 1, %x
+ ret i32 %A
+; CHECK-LABEL: @test20(
+; CHECK-NEXT: add i32 %x, 1
+; CHECK-NEXT: icmp ult i32 %{{.*}}, 3
+; CHECK-NEXT: select i1 %{{.*}}, i32 %x, i32 {{.*}}
+; CHECK-NEXT: ret i32
+}
diff --git a/test/Transforms/InstCombine/gep-addrspace.ll b/test/Transforms/InstCombine/gep-addrspace.ll
index 24c355d..29511a3 100644
--- a/test/Transforms/InstCombine/gep-addrspace.ll
+++ b/test/Transforms/InstCombine/gep-addrspace.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S
+; RUN: opt < %s -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-pc-win32"
@@ -17,3 +17,18 @@ ST:
ret void
}
+@array = internal addrspace(3) global [256 x float] zeroinitializer, align 4
+@scalar = internal addrspace(3) global float 0.000000e+00, align 4
+
+define void @keep_necessary_addrspacecast(i64 %i, float** %out0, float** %out1) {
+entry:
+; CHECK-LABEL: @keep_necessary_addrspacecast
+ %0 = getelementptr [256 x float]* addrspacecast ([256 x float] addrspace(3)* @array to [256 x float]*), i64 0, i64 %i
+; CHECK: addrspacecast float addrspace(3)* %{{[0-9]+}} to float*
+ %1 = getelementptr [0 x float]* addrspacecast (float addrspace(3)* @scalar to [0 x float]*), i64 0, i64 %i
+; CHECK: addrspacecast float addrspace(3)* %{{[0-9]+}} to float*
+ store float* %0, float** %out0, align 4
+ store float* %1, float** %out1, align 4
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/icmp.ll b/test/Transforms/InstCombine/icmp.ll
index 12a4744..f45897c 100644
--- a/test/Transforms/InstCombine/icmp.ll
+++ b/test/Transforms/InstCombine/icmp.ll
@@ -1356,3 +1356,12 @@ define i1 @icmp_ashr_ashr_ne(i32 %a, i32 %b) nounwind {
%z = icmp ne i32 %x, %y
ret i1 %z
}
+
+; CHECK-LABEL: @icmp_neg_cst_slt
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %a, 10
+; CHECK-NEXT: ret i1 [[CMP]]
+define i1 @icmp_neg_cst_slt(i32 %a) {
+ %1 = sub nsw i32 0, %a
+ %2 = icmp slt i32 %1, -10
+ ret i1 %2
+}
diff --git a/test/Transforms/InstCombine/memcpy-from-global.ll b/test/Transforms/InstCombine/memcpy-from-global.ll
index 58793ab..b5a0ab8 100644
--- a/test/Transforms/InstCombine/memcpy-from-global.ll
+++ b/test/Transforms/InstCombine/memcpy-from-global.ll
@@ -7,11 +7,11 @@ entry:
%lookupTable = alloca [128 x float], align 16 ; <[128 x float]*> [#uses=5]
%lookupTable1 = bitcast [128 x float]* %lookupTable to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %lookupTable1, i8* bitcast ([128 x float]* @C.0.1248 to i8*), i64 512, i32 16, i1 false)
-
+
; CHECK-LABEL: @test1(
; CHECK-NOT: alloca
; CHECK-NOT: call{{.*}}@llvm.memcpy
-
+
%tmp3 = shl i32 %hash, 2 ; <i32> [#uses=1]
%tmp5 = and i32 %tmp3, 124 ; <i32> [#uses=4]
%tmp753 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp5 ; <float*> [#uses=1]
@@ -37,6 +37,9 @@ entry:
}
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p1i8.p0i8.i64(i8 addrspace(1)* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p1i8.i64(i8* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind
%T = type { i8, [123 x i8] }
%U = type { i32, i32, i32, i32, i32 }
@@ -64,7 +67,30 @@ define void @test2() {
ret void
}
+define void @test2_addrspacecast() {
+ %A = alloca %T
+ %B = alloca %T
+ %a = addrspacecast %T* %A to i8 addrspace(1)*
+ %b = addrspacecast %T* %B to i8 addrspace(1)*
+
+; CHECK-LABEL: @test2_addrspacecast(
+
+; %A alloca is deleted
+; This doesn't exactly match what test2 does, because folding the type
+; cast into the alloca doesn't work for the addrspacecast yet.
+; CHECK-NEXT: alloca %T
+; CHECK-NEXT: addrspacecast
+
+; use @G instead of %A
+; CHECK-NEXT: call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %{{.*}},
+ call void @llvm.memcpy.p1i8.p0i8.i64(i8 addrspace(1)* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
+ call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %b, i8 addrspace(1)* %a, i64 124, i32 4, i1 false)
+ call void @bar_as1(i8 addrspace(1)* %b)
+ ret void
+}
+
declare void @bar(i8*)
+declare void @bar_as1(i8 addrspace(1)*)
;; Should be able to eliminate the alloca.
@@ -78,11 +104,22 @@ define void @test3() {
ret void
}
+define void @test3_addrspacecast() {
+ %A = alloca %T
+ %a = bitcast %T* %A to i8*
+ call void @llvm.memcpy.p0i8.p1i8.i64(i8* %a, i8 addrspace(1)* addrspacecast (%T* @G to i8 addrspace(1)*), i64 124, i32 4, i1 false)
+ call void @bar(i8* %a) readonly
+; CHECK-LABEL: @test3_addrspacecast(
+; CHECK-NEXT: call void @bar(i8* getelementptr inbounds (%T* @G, i64 0, i32 0))
+ ret void
+}
+
+
define void @test4() {
%A = alloca %T
%a = bitcast %T* %A to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
- call void @baz(i8* byval %a)
+ call void @baz(i8* byval %a)
; CHECK-LABEL: @test4(
; CHECK-NEXT: call void @baz(i8* byval getelementptr inbounds (%T* @G, i64 0, i32 0))
ret void
@@ -94,7 +131,7 @@ define void @test5() {
%a = bitcast %T* %A to i8*
call void @llvm.lifetime.start(i64 -1, i8* %a)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
- call void @baz(i8* byval %a)
+ call void @baz(i8* byval %a)
; CHECK-LABEL: @test5(
; CHECK-NEXT: call void @baz(i8* byval getelementptr inbounds (%T* @G, i64 0, i32 0))
ret void
@@ -135,6 +172,18 @@ define void @test8() {
ret void
}
+
+define void @test8_addrspacecast() {
+ %A = alloca %U, align 16
+ %a = bitcast %U* %A to i8*
+ call void @llvm.memcpy.p0i8.p1i8.i64(i8* %a, i8 addrspace(1)* addrspacecast (%U* getelementptr ([2 x %U]* @H, i64 0, i32 1) to i8 addrspace(1)*), i64 20, i32 4, i1 false)
+ call void @bar(i8* %a) readonly
+; CHECK-LABEL: @test8_addrspacecast(
+; CHECK: llvm.memcpy
+; CHECK: bar
+ ret void
+}
+
define void @test9() {
%A = alloca %U, align 4
%a = bitcast %U* %A to i8*
@@ -144,3 +193,13 @@ define void @test9() {
; CHECK-NEXT: call void @bar(i8* bitcast (%U* getelementptr inbounds ([2 x %U]* @H, i64 0, i64 1) to i8*))
ret void
}
+
+define void @test9_addrspacecast() {
+ %A = alloca %U, align 4
+ %a = bitcast %U* %A to i8*
+ call void @llvm.memcpy.p0i8.p1i8.i64(i8* %a, i8 addrspace(1)* addrspacecast (%U* getelementptr ([2 x %U]* @H, i64 0, i32 1) to i8 addrspace(1)*), i64 20, i32 4, i1 false)
+ call void @bar(i8* %a) readonly
+; CHECK-LABEL: @test9_addrspacecast(
+; CHECK-NEXT: call void @bar(i8* bitcast (%U* getelementptr inbounds ([2 x %U]* @H, i64 0, i64 1) to i8*))
+ ret void
+}
diff --git a/test/Transforms/InstCombine/overflow-mul.ll b/test/Transforms/InstCombine/overflow-mul.ll
new file mode 100644
index 0000000..04019ae
--- /dev/null
+++ b/test/Transforms/InstCombine/overflow-mul.ll
@@ -0,0 +1,164 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+; return mul(zext x, zext y) > MAX
+define i32 @pr4917_1(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_1(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+; CHECK-NOT: zext i32
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %overflow = icmp ugt i64 %mul64, 4294967295
+; CHECK: extractvalue { i32, i1 } [[MUL]], 1
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
+; return mul(zext x, zext y) >= MAX+1
+define i32 @pr4917_1a(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_1a(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+; CHECK-NOT: zext i32
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %overflow = icmp uge i64 %mul64, 4294967296
+; CHECK: extractvalue { i32, i1 } [[MUL]], 1
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
+; mul(zext x, zext y) > MAX
+; mul(x, y) is used
+define i32 @pr4917_2(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_2(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+; CHECK-NOT: zext i32
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %overflow = icmp ugt i64 %mul64, 4294967295
+; CHECK-DAG: [[VAL:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
+ %mul32 = trunc i64 %mul64 to i32
+; CHECK-DAG: [[OVFL:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
+ %retval = select i1 %overflow, i32 %mul32, i32 111
+; CHECK: select i1 [[OVFL]], i32 [[VAL]]
+ ret i32 %retval
+}
+
+; return mul(zext x, zext y) > MAX
+; mul is used in non-truncate
+define i64 @pr4917_3(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_3(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+ %mul64 = mul i64 %l, %r
+; CHECK-NOT: umul.with.overflow.i32
+ %overflow = icmp ugt i64 %mul64, 4294967295
+ %retval = select i1 %overflow, i64 %mul64, i64 111
+ ret i64 %retval
+}
+
+; return mul(zext x, zext y) <= MAX
+define i32 @pr4917_4(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_4(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+; CHECK-NOT: zext i32
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %overflow = icmp ule i64 %mul64, 4294967295
+; CHECK: extractvalue { i32, i1 } [[MUL]], 1
+; CHECK: xor
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
+; return mul(zext x, zext y) < MAX+1
+define i32 @pr4917_4a(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_4a(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+; CHECK-NOT: zext i32
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %overflow = icmp ult i64 %mul64, 4294967296
+; CHECK: extractvalue { i32, i1 } [[MUL]], 1
+; CHECK: xor
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
+; operands of mul are of different size
+define i32 @pr4917_5(i32 %x, i8 %y) nounwind {
+; CHECK-LABEL: @pr4917_5(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i8 %y to i64
+; CHECK: [[Y:%.*]] = zext i8 %y to i32
+ %mul64 = mul i64 %l, %r
+ %overflow = icmp ugt i64 %mul64, 4294967295
+ %mul32 = trunc i64 %mul64 to i32
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 [[Y]])
+; CHECK-DAG: [[VAL:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
+; CHECK-DAG: [[OVFL:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
+ %retval = select i1 %overflow, i32 %mul32, i32 111
+; CHECK: select i1 [[OVFL]], i32 [[VAL]]
+ ret i32 %retval
+}
+
+; mul(zext x, zext y) != zext trunc mul
+define i32 @pr4918_1(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4918_1(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %part32 = trunc i64 %mul64 to i32
+ %part64 = zext i32 %part32 to i64
+ %overflow = icmp ne i64 %mul64, %part64
+; CHECK: [[OVFL:%.*]] = extractvalue { i32, i1 } [[MUL:%.*]], 1
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
+; mul(zext x, zext y) == zext trunc mul
+define i32 @pr4918_2(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4918_2(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %part32 = trunc i64 %mul64 to i32
+ %part64 = zext i32 %part32 to i64
+ %overflow = icmp eq i64 %mul64, %part64
+; CHECK: extractvalue { i32, i1 } [[MUL]]
+ %retval = zext i1 %overflow to i32
+; CHECK: xor
+ ret i32 %retval
+}
+
+; zext trunc mul != mul(zext x, zext y)
+define i32 @pr4918_3(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4918_3(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %part32 = trunc i64 %mul64 to i32
+ %part64 = zext i32 %part32 to i64
+ %overflow = icmp ne i64 %part64, %mul64
+; CHECK: extractvalue { i32, i1 } [[MUL]], 1
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
diff --git a/test/Transforms/InstCombine/pr19420.ll b/test/Transforms/InstCombine/pr19420.ll
new file mode 100644
index 0000000..23fa0a4
--- /dev/null
+++ b/test/Transforms/InstCombine/pr19420.ll
@@ -0,0 +1,67 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+; CHECK-LABEL: @test_FoldShiftByConstant_CreateSHL
+; CHECK: mul <4 x i32> %in, <i32 0, i32 -32, i32 0, i32 -32>
+; CHECK-NEXT: ret
+define <4 x i32> @test_FoldShiftByConstant_CreateSHL(<4 x i32> %in) {
+ %mul.i = mul <4 x i32> %in, <i32 0, i32 -1, i32 0, i32 -1>
+ %vshl_n = shl <4 x i32> %mul.i, <i32 5, i32 5, i32 5, i32 5>
+ ret <4 x i32> %vshl_n
+}
+
+; CHECK-LABEL: @test_FoldShiftByConstant_CreateSHL2
+; CHECK: mul <8 x i16> %in, <i16 0, i16 -32, i16 0, i16 -32, i16 0, i16 -32, i16 0, i16 -32>
+; CHECK-NEXT: ret
+define <8 x i16> @test_FoldShiftByConstant_CreateSHL2(<8 x i16> %in) {
+ %mul.i = mul <8 x i16> %in, <i16 0, i16 -1, i16 0, i16 -1, i16 0, i16 -1, i16 0, i16 -1>
+ %vshl_n = shl <8 x i16> %mul.i, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ret <8 x i16> %vshl_n
+}
+
+; CHECK-LABEL: @test_FoldShiftByConstant_CreateAnd
+; CHECK: mul <16 x i8> %in0, <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
+; CHECK-NEXT: and <16 x i8> %vsra_n2, <i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32>
+; CHECK-NEXT: ret
+define <16 x i8> @test_FoldShiftByConstant_CreateAnd(<16 x i8> %in0) {
+ %vsra_n = ashr <16 x i8> %in0, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ %tmp = add <16 x i8> %in0, %vsra_n
+ %vshl_n = shl <16 x i8> %tmp, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ ret <16 x i8> %vshl_n
+}
+
+
+define i32 @bar(i32 %x, i32 %y) {
+ %a = lshr i32 %x, 4
+ %b = add i32 %a, %y
+ %c = shl i32 %b, 4
+ ret i32 %c
+}
+
+define <2 x i32> @bar_v2i32(<2 x i32> %x, <2 x i32> %y) {
+ %a = lshr <2 x i32> %x, <i32 5, i32 5>
+ %b = add <2 x i32> %a, %y
+ %c = shl <2 x i32> %b, <i32 5, i32 5>
+ ret <2 x i32> %c
+}
+
+
+
+
+define i32 @foo(i32 %x, i32 %y) {
+ %a = lshr i32 %x, 4
+ %b = and i32 %a, 8
+ %c = add i32 %b, %y
+ %d = shl i32 %c, 4
+ ret i32 %d
+}
+
+define <2 x i32> @foo_v2i32(<2 x i32> %x, <2 x i32> %y) {
+ %a = lshr <2 x i32> %x, <i32 4, i32 4>
+ %b = and <2 x i32> %a, <i32 8, i32 8>
+ %c = add <2 x i32> %b, %y
+ %d = shl <2 x i32> %c, <i32 4, i32 4>
+ ret <2 x i32> %d
+}
+
+
+
diff --git a/test/Transforms/InstCombine/select.ll b/test/Transforms/InstCombine/select.ll
index 1458bde..2213be1 100644
--- a/test/Transforms/InstCombine/select.ll
+++ b/test/Transforms/InstCombine/select.ll
@@ -1031,3 +1031,103 @@ define i32 @test67(i16 %x) {
; CHECK: lshr exact i32 %2, 1
; CHECK: xor i32 %3, 42
}
+
+; SMIN(SMIN(X, 11), 92) -> SMIN(X, 11)
+define i32 @test68(i32 %x) {
+entry:
+ %cmp = icmp slt i32 11, %x
+ %cond = select i1 %cmp, i32 11, i32 %x
+ %cmp3 = icmp slt i32 92, %cond
+ %retval = select i1 %cmp3, i32 92, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test68(
+; CHECK: ret i32 %cond
+}
+
+; MIN(MIN(X, 24), 83) -> MIN(X, 24)
+define i32 @test69(i32 %x) {
+entry:
+ %cmp = icmp ult i32 24, %x
+ %cond = select i1 %cmp, i32 24, i32 %x
+ %cmp3 = icmp ult i32 83, %cond
+ %retval = select i1 %cmp3, i32 83, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test69(
+; CHECK: ret i32 %cond
+}
+
+; SMAX(SMAX(X, 75), 36) -> SMAX(X, 75)
+define i32 @test70(i32 %x) {
+entry:
+ %cmp = icmp slt i32 %x, 75
+ %cond = select i1 %cmp, i32 75, i32 %x
+ %cmp3 = icmp slt i32 %cond, 36
+ %retval = select i1 %cmp3, i32 36, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test70(
+; CHECK: ret i32 %cond
+}
+
+; MAX(MAX(X, 68), 47) -> MAX(X, 68)
+define i32 @test71(i32 %x) {
+entry:
+ %cmp = icmp ult i32 %x, 68
+ %cond = select i1 %cmp, i32 68, i32 %x
+ %cmp3 = icmp ult i32 %cond, 47
+ %retval = select i1 %cmp3, i32 47, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test71(
+; CHECK: ret i32 %cond
+}
+
+; SMIN(SMIN(X, 92), 11) -> SMIN(X, 11)
+define i32 @test72(i32 %x) {
+ %cmp = icmp sgt i32 %x, 92
+ %cond = select i1 %cmp, i32 92, i32 %x
+ %cmp3 = icmp sgt i32 %cond, 11
+ %retval = select i1 %cmp3, i32 11, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test72(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 11
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 11, i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+; MIN(MIN(X, 83), 24) -> MIN(X, 24)
+define i32 @test73(i32 %x) {
+ %cmp = icmp ugt i32 %x, 83
+ %cond = select i1 %cmp, i32 83, i32 %x
+ %cmp3 = icmp ugt i32 %cond, 24
+ %retval = select i1 %cmp3, i32 24, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test73(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp ugt i32 %x, 24
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 24, i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+; SMAX(SMAX(X, 36), 75) -> SMAX(X, 75)
+define i32 @test74(i32 %x) {
+ %cmp = icmp slt i32 %x, 36
+ %cond = select i1 %cmp, i32 36, i32 %x
+ %cmp3 = icmp slt i32 %cond, 75
+ %retval = select i1 %cmp3, i32 75, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test74(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 75
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 75, i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+; MAX(MAX(X, 47), 68) -> MAX(X, 68)
+define i32 @test75(i32 %x) {
+ %cmp = icmp ult i32 %x, 47
+ %cond = select i1 %cmp, i32 47, i32 %x
+ %cmp3 = icmp ult i32 %cond, 68
+ %retval = select i1 %cmp3, i32 68, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test75(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp ult i32 %x, 68
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 68, i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+} \ No newline at end of file
diff --git a/test/Transforms/InstCombine/shift.ll b/test/Transforms/InstCombine/shift.ll
index b1082f0..5586bb6 100644
--- a/test/Transforms/InstCombine/shift.ll
+++ b/test/Transforms/InstCombine/shift.ll
@@ -36,17 +36,52 @@ define i32 @test4(i8 %A) {
define i32 @test5(i32 %A) {
; CHECK-LABEL: @test5(
; CHECK: ret i32 undef
- %B = lshr i32 %A, 32 ;; shift all bits out
+ %B = lshr i32 %A, 32 ;; shift all bits out
ret i32 %B
}
+define <4 x i32> @test5_splat_vector(<4 x i32> %A) {
+; CHECK-LABEL: @test5_splat_vector(
+; CHECK: ret <4 x i32> undef
+ %B = lshr <4 x i32> %A, <i32 32, i32 32, i32 32, i32 32> ;; shift all bits out
+ ret <4 x i32> %B
+}
+
+define <4 x i32> @test5_zero_vector(<4 x i32> %A) {
+; CHECK-LABEL: @test5_zero_vector(
+; CHECK-NEXT: ret <4 x i32> %A
+ %B = lshr <4 x i32> %A, zeroinitializer
+ ret <4 x i32> %B
+}
+
+define <4 x i32> @test5_non_splat_vector(<4 x i32> %A) {
+; CHECK-LABEL: @test5_non_splat_vector(
+; CHECK-NOT: ret <4 x i32> undef
+ %B = shl <4 x i32> %A, <i32 32, i32 1, i32 2, i32 3>
+ ret <4 x i32> %B
+}
+
define i32 @test5a(i32 %A) {
; CHECK-LABEL: @test5a(
; CHECK: ret i32 undef
- %B = shl i32 %A, 32 ;; shift all bits out
+ %B = shl i32 %A, 32 ;; shift all bits out
ret i32 %B
}
+define <4 x i32> @test5a_splat_vector(<4 x i32> %A) {
+; CHECK-LABEL: @test5a_splat_vector(
+; CHECK: ret <4 x i32> undef
+ %B = shl <4 x i32> %A, <i32 32, i32 32, i32 32, i32 32> ;; shift all bits out
+ ret <4 x i32> %B
+}
+
+define <4 x i32> @test5a_non_splat_vector(<4 x i32> %A) {
+; CHECK-LABEL: @test5a_non_splat_vector(
+; CHECK-NOT: ret <4 x i32> undef
+ %B = shl <4 x i32> %A, <i32 32, i32 1, i32 2, i32 3>
+ ret <4 x i32> %B
+}
+
define i32 @test5b() {
; CHECK-LABEL: @test5b(
; CHECK: ret i32 -1
@@ -82,7 +117,7 @@ define i32 @test6a(i32 %A) {
define i32 @test7(i8 %A) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: ret i32 -1
- %shift.upgrd.3 = zext i8 %A to i32
+ %shift.upgrd.3 = zext i8 %A to i32
%B = ashr i32 -1, %shift.upgrd.3 ;; Always equal to -1
ret i32 %B
}
@@ -232,7 +267,7 @@ define i1 @test16(i32 %X) {
; CHECK-NEXT: and i32 %X, 16
; CHECK-NEXT: icmp ne i32
; CHECK-NEXT: ret i1
- %tmp.3 = ashr i32 %X, 4
+ %tmp.3 = ashr i32 %X, 4
%tmp.6 = and i32 %tmp.3, 1
%tmp.7 = icmp ne i32 %tmp.6, 0
ret i1 %tmp.7
@@ -344,6 +379,20 @@ define i32 @test25(i32 %tmp.2, i32 %AA) {
ret i32 %tmp.6
}
+define <2 x i32> @test25_vector(<2 x i32> %tmp.2, <2 x i32> %AA) {
+; CHECK-LABEL: @test25_vector(
+; CHECK: %tmp.3 = lshr <2 x i32> %tmp.2, <i32 17, i32 17>
+; CHECK-NEXT: shl <2 x i32> %tmp.3, <i32 17, i32 17>
+; CHECK-NEXT: add <2 x i32> %tmp.51, %AA
+; CHECK-NEXT: and <2 x i32> %x2, <i32 -131072, i32 -131072>
+; CHECK-NEXT: ret <2 x i32>
+ %x = lshr <2 x i32> %AA, <i32 17, i32 17>
+ %tmp.3 = lshr <2 x i32> %tmp.2, <i32 17, i32 17>
+ %tmp.5 = add <2 x i32> %tmp.3, %x
+ %tmp.6 = shl <2 x i32> %tmp.5, <i32 17, i32 17>
+ ret <2 x i32> %tmp.6
+}
+
;; handle casts between shifts.
define i32 @test26(i32 %A) {
; CHECK-LABEL: @test26(
@@ -365,12 +414,12 @@ define i1 @test27(i32 %x) nounwind {
%z = trunc i32 %y to i1
ret i1 %z
}
-
+
define i8 @test28(i8 %x) {
entry:
; CHECK-LABEL: @test28(
; CHECK: icmp slt i8 %x, 0
-; CHECK-NEXT: br i1
+; CHECK-NEXT: br i1
%tmp1 = lshr i8 %x, 7
%cond1 = icmp ne i8 %tmp1, 0
br i1 %cond1, label %bb1, label %bb2
@@ -476,7 +525,7 @@ entry:
%ins = or i128 %tmp23, %tmp27
%tmp45 = lshr i128 %ins, 64
ret i128 %tmp45
-
+
; CHECK-LABEL: @test36(
; CHECK: %tmp231 = or i128 %B, %A
; CHECK: %ins = and i128 %tmp231, 18446744073709551615
@@ -492,7 +541,7 @@ entry:
%tmp45 = lshr i128 %ins, 64
%tmp46 = trunc i128 %tmp45 to i64
ret i64 %tmp46
-
+
; CHECK-LABEL: @test37(
; CHECK: %tmp23 = shl nuw nsw i128 %tmp22, 32
; CHECK: %ins = or i128 %tmp23, %A
@@ -780,3 +829,32 @@ bb11: ; preds = %bb8
bb12: ; preds = %bb11, %bb8, %bb
ret void
}
+
+define i32 @test64(i32 %a) {
+; CHECK-LABEL: @test64(
+; CHECK-NEXT: ret i32 undef
+ %b = ashr i32 %a, 32 ; shift all bits out
+ ret i32 %b
+}
+
+define <4 x i32> @test64_splat_vector(<4 x i32> %a) {
+; CHECK-LABEL: @test64_splat_vector
+; CHECK-NEXT: ret <4 x i32> undef
+ %b = ashr <4 x i32> %a, <i32 32, i32 32, i32 32, i32 32> ; shift all bits out
+ ret <4 x i32> %b
+}
+
+define <4 x i32> @test64_non_splat_vector(<4 x i32> %a) {
+; CHECK-LABEL: @test64_non_splat_vector
+; CHECK-NOT: ret <4 x i32> undef
+ %b = ashr <4 x i32> %a, <i32 32, i32 0, i32 1, i32 2> ; shift all bits out
+ ret <4 x i32> %b
+}
+
+define <2 x i65> @test_65(<2 x i64> %t) {
+; CHECK-LABEL: @test_65
+ %a = zext <2 x i64> %t to <2 x i65>
+ %sext = shl <2 x i65> %a, <i65 33, i65 33>
+ %b = ashr <2 x i65> %sext, <i65 33, i65 33>
+ ret <2 x i65> %b
+}
diff --git a/test/Transforms/InstCombine/strlen-1.ll b/test/Transforms/InstCombine/strlen-1.ll
index 4fa5b4f..4a3caf2 100644
--- a/test/Transforms/InstCombine/strlen-1.ll
+++ b/test/Transforms/InstCombine/strlen-1.ll
@@ -5,6 +5,7 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
@hello = constant [6 x i8] c"hello\00"
+@longer = constant [7 x i8] c"longer\00"
@null = constant [1 x i8] zeroinitializer
@null_hello = constant [7 x i8] c"\00hello\00"
@nullstring = constant i8 0
@@ -85,6 +86,17 @@ define i1 @test_simplify8() {
; CHECK-NEXT: ret i1 false
}
+define i32 @test_simplify9(i1 %x) {
+; CHECK-LABEL: @test_simplify9
+ %hello = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %longer = getelementptr [7 x i8]* @longer, i32 0, i32 0
+ %s = select i1 %x, i8* %hello, i8* %longer
+ %l = call i32 @strlen(i8* %s)
+; CHECK-NEXT: select i1 %x, i32 5, i32 6
+ ret i32 %l
+; CHECK-NEXT: ret
+}
+
; Check cases that shouldn't be simplified.
define i32 @test_no_simplify1() {
diff --git a/test/Transforms/InstCombine/vec_demanded_elts.ll b/test/Transforms/InstCombine/vec_demanded_elts.ll
index d12412a..41d2b29 100644
--- a/test/Transforms/InstCombine/vec_demanded_elts.ll
+++ b/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define i16 @test1(float %f) {
entry:
@@ -209,4 +210,369 @@ define <4 x float> @test_select(float %f, float %g) {
ret <4 x float> %ret
}
+; We should optimize these two redundant insertqi into one
+; CHECK: define <2 x i64> @testInsertTwice(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertTwice(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 32)
+; CHECK-NOT: insertqi
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 32)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 32, i8 32)
+ ret <2 x i64> %2
+}
+
+; The result of this insert is the second arg, since the top 64 bits of
+; the result are undefined, and we copy the bottom 64 bits from the
+; second arg
+; CHECK: define <2 x i64> @testInsert64Bits(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsert64Bits(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: ret <2 x i64> %i
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 64, i8 0)
+ ret <2 x i64> %1
+}
+
+; Test the several types of ranges and ordering that exist for two insertqi
+; CHECK: define <2 x i64> @testInsertContainedRange(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertContainedRange(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 0)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 16)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertContainedRange_2(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertContainedRange_2(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 16)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 32, i8 0)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertOverlappingRange(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertOverlappingRange(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 48, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 0)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 32, i8 16)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertOverlappingRange_2(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertOverlappingRange_2(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 48, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 16)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 32, i8 0)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertAdjacentRange(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertAdjacentRange(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 48, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 0)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 32)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertAdjacentRange_2(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertAdjacentRange_2(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 48, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 32)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 32, i8 0)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertDisjointRange(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertDisjointRange(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 0)
+; CHECK: tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 32)
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 0)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 32)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertDisjointRange_2(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertDisjointRange_2(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 0)
+; CHECK: tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 32)
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 0)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 32)
+ ret <2 x i64> %2
+}
+
+
+; CHECK: declare <2 x i64> @llvm.x86.sse4a.insertqi
+declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind
+
+declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>)
+define <4 x float> @test_vpermilvar_ps(<4 x float> %v) {
+; CHECK-LABEL: @test_vpermilvar_ps(
+; CHECK: shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
+ ret <4 x float> %a
+}
+
+declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>)
+define <8 x float> @test_vpermilvar_ps_256(<8 x float> %v) {
+; CHECK-LABEL: @test_vpermilvar_ps_256(
+; CHECK: shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>)
+ ret <8 x float> %a
+}
+
+declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i32>)
+define <2 x double> @test_vpermilvar_pd(<2 x double> %v) {
+; CHECK-LABEL: @test_vpermilvar_pd(
+; CHECK: shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+ %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i32> <i32 2, i32 0>)
+ ret <2 x double> %a
+}
+
+declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i32>)
+define <4 x double> @test_vpermilvar_pd_256(<4 x double> %v) {
+; CHECK-LABEL: @test_vpermilvar_pd_256(
+; CHECK: shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i32> <i32 3, i32 1, i32 2, i32 0>)
+ ret <4 x double> %a
+}
+
+define <4 x float> @test_vpermilvar_ps_zero(<4 x float> %v) {
+; CHECK-LABEL: @test_vpermilvar_ps_zero(
+; CHECK: shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> zeroinitializer)
+ ret <4 x float> %a
+}
+
+define <8 x float> @test_vpermilvar_ps_256_zero(<8 x float> %v) {
+; CHECK-LABEL: @test_vpermilvar_ps_256_zero(
+; CHECK: shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> zeroinitializer)
+ ret <8 x float> %a
+}
+
+define <2 x double> @test_vpermilvar_pd_zero(<2 x double> %v) {
+; CHECK-LABEL: @test_vpermilvar_pd_zero(
+; CHECK: shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer
+ %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i32> zeroinitializer)
+ ret <2 x double> %a
+}
+
+define <4 x double> @test_vpermilvar_pd_256_zero(<4 x double> %v) {
+; CHECK-LABEL: @test_vpermilvar_pd_256_zero(
+; CHECK: shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
+ %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i32> zeroinitializer)
+ ret <4 x double> %a
+}
+
+define <2 x i64> @test_sse2_1() nounwind readnone uwtable {
+ %S = bitcast i32 1 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, <8 x i16> %4)
+ %6 = bitcast <8 x i16> %5 to <4 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <4 x i32> %8 to <2 x i64>
+ %10 = tail call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <2 x i64> %10 to <8 x i16>
+ %12 = tail call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %11, i32 %S)
+ %13 = bitcast <8 x i16> %12 to <4 x i32>
+ %14 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %13, i32 %S)
+ %15 = bitcast <4 x i32> %14 to <2 x i64>
+ %16 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %15, i32 %S)
+ ret <2 x i64> %16
+; CHECK: test_sse2_1
+; CHECK: ret <2 x i64> <i64 72058418680037440, i64 144117112246370624>
+}
+
+define <4 x i64> @test_avx2_1() nounwind readnone uwtable {
+ %S = bitcast i32 1 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> <i16 1, i16 0, i16 0, i16 0, i16 2, i16 0, i16 0, i16 0, i16 3, i16 0, i16 0, i16 0, i16 4, i16 0, i16 0, i16 0>, <8 x i16> %4)
+ %6 = bitcast <16 x i16> %5 to <8 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <8 x i32> %8 to <4 x i64>
+ %10 = tail call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <4 x i64> %10 to <16 x i16>
+ %12 = tail call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %11, i32 %S)
+ %13 = bitcast <16 x i16> %12 to <8 x i32>
+ %14 = tail call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %13, i32 %S)
+ %15 = bitcast <8 x i32> %14 to <4 x i64>
+ %16 = tail call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %15, i32 %S)
+ ret <4 x i64> %16
+; CHECK: test_avx2_1
+; CHECK: ret <4 x i64> <i64 64, i64 128, i64 192, i64 256>
+}
+
+define <2 x i64> @test_sse2_0() nounwind readnone uwtable {
+ %S = bitcast i32 128 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, <8 x i16> %4)
+ %6 = bitcast <8 x i16> %5 to <4 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <4 x i32> %8 to <2 x i64>
+ %10 = tail call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <2 x i64> %10 to <8 x i16>
+ %12 = tail call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %11, i32 %S)
+ %13 = bitcast <8 x i16> %12 to <4 x i32>
+ %14 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %13, i32 %S)
+ %15 = bitcast <4 x i32> %14 to <2 x i64>
+ %16 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %15, i32 %S)
+ ret <2 x i64> %16
+; CHECK: test_sse2_0
+; CHECK: ret <2 x i64> zeroinitializer
+}
+
+define <4 x i64> @test_avx2_0() nounwind readnone uwtable {
+ %S = bitcast i32 128 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> <i16 1, i16 0, i16 0, i16 0, i16 2, i16 0, i16 0, i16 0, i16 3, i16 0, i16 0, i16 0, i16 4, i16 0, i16 0, i16 0>, <8 x i16> %4)
+ %6 = bitcast <16 x i16> %5 to <8 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <8 x i32> %8 to <4 x i64>
+ %10 = tail call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <4 x i64> %10 to <16 x i16>
+ %12 = tail call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %11, i32 %S)
+ %13 = bitcast <16 x i16> %12 to <8 x i32>
+ %14 = tail call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %13, i32 %S)
+ %15 = bitcast <8 x i32> %14 to <4 x i64>
+ %16 = tail call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %15, i32 %S)
+ ret <4 x i64> %16
+; CHECK: test_avx2_0
+; CHECK: ret <4 x i64> zeroinitializer
+}
+define <2 x i64> @test_sse2_psrl_1() nounwind readnone uwtable {
+ %S = bitcast i32 1 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> <i16 16, i16 32, i16 64, i16 128, i16 256, i16 512, i16 1024, i16 2048>, <8 x i16> %4)
+ %6 = bitcast <8 x i16> %5 to <4 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <4 x i32> %8 to <2 x i64>
+ %10 = tail call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <2 x i64> %10 to <8 x i16>
+ %12 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %11, i32 %S)
+ %13 = bitcast <8 x i16> %12 to <4 x i32>
+ %14 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %13, i32 %S)
+ %15 = bitcast <4 x i32> %14 to <2 x i64>
+ %16 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %15, i32 %S)
+ ret <2 x i64> %16
+; CHECK: test_sse2_psrl_1
+; CHECK: ret <2 x i64> <i64 562954248421376, i64 9007267974742020>
+}
+
+define <4 x i64> @test_avx2_psrl_1() nounwind readnone uwtable {
+ %S = bitcast i32 1 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> <i16 1024, i16 0, i16 0, i16 0, i16 2048, i16 0, i16 0, i16 0, i16 4096, i16 0, i16 0, i16 0, i16 8192, i16 0, i16 0, i16 0>, <8 x i16> %4)
+ %6 = bitcast <16 x i16> %5 to <8 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <8 x i32> %8 to <4 x i64>
+ %10 = tail call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <4 x i64> %10 to <16 x i16>
+ %12 = tail call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %11, i32 %S)
+ %13 = bitcast <16 x i16> %12 to <8 x i32>
+ %14 = tail call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %13, i32 %S)
+ %15 = bitcast <8 x i32> %14 to <4 x i64>
+ %16 = tail call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %15, i32 %S)
+ ret <4 x i64> %16
+; CHECK: test_avx2_psrl_1
+; CHECK: ret <4 x i64> <i64 16, i64 32, i64 64, i64 128>
+}
+
+define <2 x i64> @test_sse2_psrl_0() nounwind readnone uwtable {
+ %S = bitcast i32 128 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> <i16 32, i16 64, i16 128, i16 256, i16 512, i16 1024, i16 2048, i16 4096>, <8 x i16> %4)
+ %6 = bitcast <8 x i16> %5 to <4 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <4 x i32> %8 to <2 x i64>
+ %10 = tail call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <2 x i64> %10 to <8 x i16>
+ %12 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %11, i32 %S)
+ %13 = bitcast <8 x i16> %12 to <4 x i32>
+ %14 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %13, i32 %S)
+ %15 = bitcast <4 x i32> %14 to <2 x i64>
+ %16 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %15, i32 %S)
+ ret <2 x i64> %16
+; CHECK: test_sse2_psrl_0
+; CHECK: ret <2 x i64> zeroinitializer
+}
+
+define <4 x i64> @test_avx2_psrl_0() nounwind readnone uwtable {
+ %S = bitcast i32 128 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> <i16 1024, i16 0, i16 0, i16 0, i16 2048, i16 0, i16 0, i16 0, i16 4096, i16 0, i16 0, i16 0, i16 8192, i16 0, i16 0, i16 0>, <8 x i16> %4)
+ %6 = bitcast <16 x i16> %5 to <8 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <8 x i32> %8 to <4 x i64>
+ %10 = tail call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <4 x i64> %10 to <16 x i16>
+ %12 = tail call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %11, i32 %S)
+ %13 = bitcast <16 x i16> %12 to <8 x i32>
+ %14 = tail call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %13, i32 %S)
+ %15 = bitcast <8 x i32> %14 to <4 x i64>
+ %16 = tail call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %15, i32 %S)
+ ret <4 x i64> %16
+; CHECK: test_avx2_psrl_0
+; CHECK: ret <4 x i64> zeroinitializer
+}
+
+declare <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64>, i32) #1
+declare <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32>, i32) #1
+declare <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16>, i32) #1
+declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) #1
+declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) #1
+declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) #1
+declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) #1
+declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) #1
+declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) #1
+declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) #1
+declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) #1
+declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) #1
+declare <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64>, i32) #1
+declare <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32>, i32) #1
+declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) #1
+declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) #1
+declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) #1
+declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) #1
+declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) #1
+declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) #1
+declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) #1
+declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) #1
+declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) #1
+declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) #1
+attributes #1 = { nounwind readnone }
diff --git a/test/Transforms/InstCombine/vec_shuffle.ll b/test/Transforms/InstCombine/vec_shuffle.ll
index a409a91..fc0f8bd 100644
--- a/test/Transforms/InstCombine/vec_shuffle.ll
+++ b/test/Transforms/InstCombine/vec_shuffle.ll
@@ -244,4 +244,164 @@ define <4 x i8> @test16b(i8 %ele) {
%tmp1 = shl <8 x i8> %tmp0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
ret <4 x i8> %tmp2
-} \ No newline at end of file
+}
+
+; If composition of two shuffles is identity, shuffles can be removed.
+define <4 x i32> @shuffle_17ident(<4 x i32> %v) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17ident(
+; CHECK-NOT: shufflevector
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %shuffle2 = shufflevector <4 x i32> %shuffle, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ ret <4 x i32> %shuffle2
+}
+
+; swizzle can be put after operation
+define <4 x i32> @shuffle_17and(<4 x i32> %v1, <4 x i32> %v2) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17and(
+; CHECK-NOT: shufflevector
+; CHECK: and <4 x i32> %v1, %v2
+; CHECK: shufflevector
+ %t1 = shufflevector <4 x i32> %v1, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %t2 = shufflevector <4 x i32> %v2, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = and <4 x i32> %t1, %t2
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @shuffle_17add(<4 x i32> %v1, <4 x i32> %v2) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17add(
+; CHECK-NOT: shufflevector
+; CHECK: add <4 x i32> %v1, %v2
+; CHECK: shufflevector
+ %t1 = shufflevector <4 x i32> %v1, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %t2 = shufflevector <4 x i32> %v2, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = add <4 x i32> %t1, %t2
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @shuffle_17addnsw(<4 x i32> %v1, <4 x i32> %v2) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17addnsw(
+; CHECK-NOT: shufflevector
+; CHECK: add nsw <4 x i32> %v1, %v2
+; CHECK: shufflevector
+ %t1 = shufflevector <4 x i32> %v1, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %t2 = shufflevector <4 x i32> %v2, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = add nsw <4 x i32> %t1, %t2
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @shuffle_17addnuw(<4 x i32> %v1, <4 x i32> %v2) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17addnuw(
+; CHECK-NOT: shufflevector
+; CHECK: add nuw <4 x i32> %v1, %v2
+; CHECK: shufflevector
+ %t1 = shufflevector <4 x i32> %v1, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %t2 = shufflevector <4 x i32> %v2, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = add nuw <4 x i32> %t1, %t2
+ ret <4 x i32> %r
+}
+
+define <4 x float> @shuffle_17fsub(<4 x float> %v1, <4 x float> %v2) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17fsub(
+; CHECK-NOT: shufflevector
+; CHECK: fsub <4 x float> %v1, %v2
+; CHECK: shufflevector
+ %t1 = shufflevector <4 x float> %v1, <4 x float> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %t2 = shufflevector <4 x float> %v2, <4 x float> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = fsub <4 x float> %t1, %t2
+ ret <4 x float> %r
+}
+
+define <4 x i32> @shuffle_17addconst(<4 x i32> %v1, <4 x i32> %v2) {
+; CHECK-LABEL: @shuffle_17addconst(
+; CHECK-NOT: shufflevector
+; CHECK: [[VAR1:%[a-zA-Z0-9.]+]] = add <4 x i32> %v1, <i32 4, i32 1, i32 2, i32 3>
+; CHECK: [[VAR2:%[a-zA-Z0-9.]+]] = shufflevector <4 x i32> [[VAR1]], <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+; CHECK: ret <4 x i32> [[VAR2]]
+ %t1 = shufflevector <4 x i32> %v1, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = add <4 x i32> %t1, <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @shuffle_17add2(<4 x i32> %v) {
+; CHECK-LABEL: @shuffle_17add2(
+; CHECK-NOT: shufflevector
+; CHECK: [[VAR:%[a-zA-Z0-9.]+]] = shl <4 x i32> %v, <i32 1, i32 1, i32 1, i32 1>
+; CHECK: ret <4 x i32> [[VAR]]
+ %t1 = shufflevector <4 x i32> %v, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %t2 = add <4 x i32> %t1, %t1
+ %r = shufflevector <4 x i32> %t2, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @shuffle_17mulsplat(<4 x i32> %v) {
+; CHECK-LABEL: @shuffle_17mulsplat(
+; CHECK-NOT: shufflevector
+; CHECK: [[VAR1:%[a-zA-Z0-9.]+]] = mul <4 x i32> %v, %v
+; CHECK: [[VAR2:%[a-zA-Z0-9.]+]] = shufflevector <4 x i32> [[VAR1]], <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK: ret <4 x i32> [[VAR2]]
+ %s1 = shufflevector <4 x i32> %v,
+ <4 x i32> zeroinitializer,
+ <4 x i32> zeroinitializer
+ %m1 = mul <4 x i32> %s1, %s1
+ %s2 = shufflevector <4 x i32> %m1,
+ <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %s2
+}
+
+; Do not reorder shuffle and binop if LHS of shuffles are of different size
+define <2 x i32> @pr19717(<4 x i32> %in0, <2 x i32> %in1) {
+; CHECK-LABEL: @pr19717(
+; CHECK: shufflevector
+; CHECK: shufflevector
+; CHECK: mul
+ %shuffle = shufflevector <4 x i32> %in0, <4 x i32> %in0, <2 x i32> zeroinitializer
+ %shuffle4 = shufflevector <2 x i32> %in1, <2 x i32> %in1, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %shuffle4
+ ret <2 x i32> %mul
+}
+
+define <4 x i16> @pr19717a(<8 x i16> %in0, <8 x i16> %in1) {
+; CHECK-LABEL: @pr19717a(
+; CHECK: [[VAR1:%[a-zA-Z0-9.]+]] = mul <8 x i16> %in0, %in1
+; CHECK: [[VAR2:%[a-zA-Z0-9.]+]] = shufflevector <8 x i16> [[VAR1]], <8 x i16> undef, <4 x i32> <i32 5, i32 5, i32 5, i32 5>
+; CHECK: ret <4 x i16> [[VAR2]]
+ %shuffle = shufflevector <8 x i16> %in0, <8 x i16> %in0, <4 x i32> <i32 5, i32 5, i32 5, i32 5>
+ %shuffle1 = shufflevector <8 x i16> %in1, <8 x i16> %in1, <4 x i32> <i32 5, i32 5, i32 5, i32 5>
+ %mul = mul <4 x i16> %shuffle, %shuffle1
+ ret <4 x i16> %mul
+}
+
+define <8 x i8> @pr19730(<16 x i8> %in0) {
+; CHECK-LABEL: @pr19730(
+; CHECK: shufflevector
+ %shuffle = shufflevector <16 x i8> %in0, <16 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ %shuffle1 = shufflevector <8 x i8> %shuffle, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i8> %shuffle1
+}
+
+define i32 @pr19737(<4 x i32> %in0) {
+; CHECK-LABEL: @pr19737(
+; CHECK: [[VAR:%[a-zA-Z0-9.]+]] = extractelement <4 x i32> %in0, i32 0
+; CHECK: ret i32 [[VAR]]
+ %shuffle.i = shufflevector <4 x i32> zeroinitializer, <4 x i32> %in0, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %neg.i = xor <4 x i32> %shuffle.i, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %and.i = and <4 x i32> %in0, %neg.i
+ %rv = extractelement <4 x i32> %and.i, i32 0
+ ret i32 %rv
+}
diff --git a/test/Transforms/InstSimplify/compare.ll b/test/Transforms/InstSimplify/compare.ll
index ee6be04..105e244 100644
--- a/test/Transforms/InstSimplify/compare.ll
+++ b/test/Transforms/InstSimplify/compare.ll
@@ -757,3 +757,129 @@ define <4 x i8> @vectorselectfold2(<4 x i8> %a, <4 x i8> %b) {
; CHECK-LABEL: @vectorselectfold
; CHECK-NEXT: ret <4 x i8> %a
}
+
+define i1 @compare_always_true_slt(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp slt i32 %2, 1
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_true_slt
+; CHECK-NEXT: ret i1 true
+}
+
+define i1 @compare_always_true_sle(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp sle i32 %2, 0
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_true_sle
+; CHECK-NEXT: ret i1 true
+}
+
+define i1 @compare_always_false_sgt(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp sgt i32 %2, 0
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_false_sgt
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @compare_always_false_sge(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp sge i32 %2, 1
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_false_sge
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @compare_always_false_eq(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp eq i32 %2, 1
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_false_eq
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @compare_always_false_ne(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp ne i32 %2, 1
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_false_ne
+; CHECK-NEXT: ret i1 true
+}
+
+define i1 @compare_dividend(i32 %a) {
+ %div = sdiv i32 2, %a
+ %cmp = icmp eq i32 %div, 3
+ ret i1 %cmp
+
+; CHECK-LABEL: @compare_dividend
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @lshr_ugt_false(i32 %a) {
+ %shr = lshr i32 1, %a
+ %cmp = icmp ugt i32 %shr, 1
+ ret i1 %cmp
+; CHECK-LABEL: @lshr_ugt_false
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @exact_lshr_ugt_false(i32 %a) {
+ %shr = lshr exact i32 30, %a
+ %cmp = icmp ult i32 %shr, 15
+ ret i1 %cmp
+; CHECK-LABEL: @exact_lshr_ugt_false
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @lshr_sgt_false(i32 %a) {
+ %shr = lshr i32 1, %a
+ %cmp = icmp sgt i32 %shr, 1
+ ret i1 %cmp
+; CHECK-LABEL: @lshr_sgt_false
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @ashr_sgt_false(i32 %a) {
+ %shr = ashr i32 -30, %a
+ %cmp = icmp sgt i32 %shr, -1
+ ret i1 %cmp
+; CHECK-LABEL: @ashr_sgt_false
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @exact_ashr_sgt_false(i32 %a) {
+ %shr = ashr exact i32 -30, %a
+ %cmp = icmp sgt i32 %shr, -15
+ ret i1 %cmp
+; CHECK-LABEL: @exact_ashr_sgt_false
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @nonnull_arg(i32* nonnull %i) {
+ %cmp = icmp eq i32* %i, null
+ ret i1 %cmp
+; CHECK-LABEL: @nonnull_arg
+; CHECK: ret i1 false
+}
+
+declare nonnull i32* @returns_nonnull_helper()
+define i1 @returns_nonnull() {
+ %call = call nonnull i32* @returns_nonnull_helper()
+ %cmp = icmp eq i32* %call, null
+ ret i1 %cmp
+; CHECK-LABEL: @returns_nonnull
+; CHECK: ret i1 false
+}
+
diff --git a/test/Transforms/InstSimplify/dead-code-removal.ll b/test/Transforms/InstSimplify/dead-code-removal.ll
new file mode 100644
index 0000000..e181f3b
--- /dev/null
+++ b/test/Transforms/InstSimplify/dead-code-removal.ll
@@ -0,0 +1,15 @@
+; RUN: opt -instsimplify -S < %s | FileCheck %s
+
+define void @foo() nounwind {
+ br i1 undef, label %1, label %4
+
+; <label>:1 ; preds = %1, %0
+; CHECK-NOT: phi
+; CHECK-NOT: sub
+ %2 = phi i32 [ %3, %1 ], [ undef, %0 ]
+ %3 = sub i32 0, undef
+ br label %1
+
+; <label>:4 ; preds = %0
+ ret void
+}
diff --git a/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll b/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll
index 47cf3f0..16bfe2a 100644
--- a/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll
+++ b/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll
@@ -1,10 +1,17 @@
-; RUN: opt < %s -internalize -internalize-public-api-list main -S | grep internal | count 3
+; RUN: opt < %s -internalize -internalize-public-api-list main -S | FileCheck %s
@A = global i32 0
+; CHECK: @A = internal global i32 0
+
@B = alias i32* @A
-@C = alias i32* @B
+; CHECK: @B = alias internal i32* @A
+
+@C = alias i32* @A
+; CHECK: @C = alias internal i32* @A
define i32 @main() {
%tmp = load i32* @C
ret i32 %tmp
}
+
+; CHECK: define i32 @main() {
diff --git a/test/Transforms/Internalize/local-visibility.ll b/test/Transforms/Internalize/local-visibility.ll
new file mode 100644
index 0000000..c24d4b7
--- /dev/null
+++ b/test/Transforms/Internalize/local-visibility.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -internalize -S | FileCheck %s
+; Internalized symbols should have default visibility.
+
+; CHECK: @global = global i32 0
+@global = global i32 0
+@llvm.used = appending global [1 x i32*] [i32* @global]
+
+; CHECK: @hidden.variable = internal global i32 0
+@hidden.variable = hidden global i32 0
+; CHECK: @protected.variable = internal global i32 0
+@protected.variable = protected global i32 0
+
+; CHECK: @hidden.alias = alias internal i32* @global
+@hidden.alias = hidden alias i32* @global
+; CHECK: @protected.alias = alias internal i32* @global
+@protected.alias = protected alias i32* @global
+
+; CHECK: define internal void @hidden.function() {
+define hidden void @hidden.function() {
+ ret void
+}
+; CHECK: define internal void @protected.function() {
+define protected void @protected.function() {
+ ret void
+}
diff --git a/test/Transforms/JumpThreading/phi-eq.ll b/test/Transforms/JumpThreading/phi-eq.ll
index 40d3c7e..e05d5ee 100644
--- a/test/Transforms/JumpThreading/phi-eq.ll
+++ b/test/Transforms/JumpThreading/phi-eq.ll
@@ -1,4 +1,4 @@
-; RUN: llvm-as < %s | opt -jump-threading | llvm-dis | FileCheck %s
+; RUN: opt < %s -jump-threading -S | FileCheck %s
; Test whether two consecutive switches with identical structures assign the
; proper value to the proper variable. This is really testing
; Instruction::isIdenticalToWhenDefined, as previously that function was
diff --git a/test/Transforms/LoopSimplify/2007-10-28-InvokeCrash.ll b/test/Transforms/LoopSimplify/2007-10-28-InvokeCrash.ll
index e91d141..0534a0b 100644
--- a/test/Transforms/LoopSimplify/2007-10-28-InvokeCrash.ll
+++ b/test/Transforms/LoopSimplify/2007-10-28-InvokeCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llvm-as < %s | opt -loop-simplify -disable-output
+; RUN: opt < %s -loop-simplify -disable-output
; PR1752
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-s0:0:64-f80:32:32"
target triple = "i686-pc-mingw32"
diff --git a/test/Transforms/LoopStrengthReduce/AArch64/lit.local.cfg b/test/Transforms/LoopStrengthReduce/AArch64/lit.local.cfg
new file mode 100644
index 0000000..6642d28
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/AArch64/lit.local.cfg
@@ -0,0 +1,5 @@
+config.suffixes = ['.ll']
+
+targets = set(config.root.targets_to_build.split())
+if not 'AArch64' in targets:
+ config.unsupported = True
diff --git a/test/Transforms/LoopStrengthReduce/ARM64/lsr-memcpy.ll b/test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll
index 9a175ad..9a175ad 100644
--- a/test/Transforms/LoopStrengthReduce/ARM64/lsr-memcpy.ll
+++ b/test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll
diff --git a/test/Transforms/LoopStrengthReduce/ARM64/lsr-memset.ll b/test/Transforms/LoopStrengthReduce/AArch64/lsr-memset.ll
index 10b2c3a..10b2c3a 100644
--- a/test/Transforms/LoopStrengthReduce/ARM64/lsr-memset.ll
+++ b/test/Transforms/LoopStrengthReduce/AArch64/lsr-memset.ll
diff --git a/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll b/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll
new file mode 100644
index 0000000..217896e
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll
@@ -0,0 +1,70 @@
+; RUN: llc -mcpu=cyclone -debug-only=loop-reduce < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; LSR used to fail here due to a bug in the ReqRegs test.
+; CHECK: The chosen solution requires
+; CHECK-NOT: No Satisfactory Solution
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-ios"
+
+define void @do_integer_add(i64 %iterations, i8* nocapture readonly %cookie) {
+entry:
+ %N = bitcast i8* %cookie to i32*
+ %0 = load i32* %N, align 4
+ %add = add nsw i32 %0, 57
+ %cmp56 = icmp eq i64 %iterations, 0
+ br i1 %cmp56, label %while.end, label %for.cond.preheader.preheader
+
+for.cond.preheader.preheader: ; preds = %entry
+ br label %for.cond.preheader
+
+while.cond.loopexit: ; preds = %for.body
+ %add21.lcssa = phi i32 [ %add21, %for.body ]
+ %dec58 = add i64 %dec58.in, -1
+ %cmp = icmp eq i64 %dec58, 0
+ br i1 %cmp, label %while.end.loopexit, label %for.cond.preheader
+
+for.cond.preheader: ; preds = %for.cond.preheader.preheader, %while.cond.loopexit
+ %dec58.in = phi i64 [ %dec58, %while.cond.loopexit ], [ %iterations, %for.cond.preheader.preheader ]
+ %a.057 = phi i32 [ %add21.lcssa, %while.cond.loopexit ], [ %add, %for.cond.preheader.preheader ]
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.cond.preheader
+ %a.154 = phi i32 [ %a.057, %for.cond.preheader ], [ %add21, %for.body ]
+ %i.053 = phi i32 [ 1, %for.cond.preheader ], [ %inc, %for.body ]
+ %inc = add nsw i32 %i.053, 1
+ %add2 = shl i32 %a.154, 1
+ %add3 = add nsw i32 %add2, %i.053
+ %add4 = shl i32 %add3, 1
+ %add5 = add nsw i32 %add4, %i.053
+ %add6 = shl i32 %add5, 1
+ %add7 = add nsw i32 %add6, %i.053
+ %add8 = shl i32 %add7, 1
+ %add9 = add nsw i32 %add8, %i.053
+ %add10 = shl i32 %add9, 1
+ %add11 = add nsw i32 %add10, %i.053
+ %add12 = shl i32 %add11, 1
+ %add13 = add nsw i32 %add12, %i.053
+ %add14 = shl i32 %add13, 1
+ %add15 = add nsw i32 %add14, %i.053
+ %add16 = shl i32 %add15, 1
+ %add17 = add nsw i32 %add16, %i.053
+ %add18 = shl i32 %add17, 1
+ %add19 = add nsw i32 %add18, %i.053
+ %add20 = shl i32 %add19, 1
+ %add21 = add nsw i32 %add20, %i.053
+ %exitcond = icmp eq i32 %inc, 1001
+ br i1 %exitcond, label %while.cond.loopexit, label %for.body
+
+while.end.loopexit: ; preds = %while.cond.loopexit
+ %add21.lcssa.lcssa = phi i32 [ %add21.lcssa, %while.cond.loopexit ]
+ br label %while.end
+
+while.end: ; preds = %while.end.loopexit, %entry
+ %a.0.lcssa = phi i32 [ %add, %entry ], [ %add21.lcssa.lcssa, %while.end.loopexit ]
+ tail call void @use_int(i32 %a.0.lcssa)
+ ret void
+}
+
+declare void @use_int(i32)
diff --git a/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll b/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
index 5d728b5..756ea82 100644
--- a/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
+++ b/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O3 -march=thumb -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
;
; LSR should only check for valid address modes when the IV user is a
; memory address.
diff --git a/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
index ab7f20f..f4edf09 100644
--- a/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
+++ b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -O3 -march=thumb -mcpu=cortex-a9 | FileCheck %s -check-prefix=A9
+; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s -check-prefix=A9
+; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a9 -addr-sink-using-gep=1 %s -o - | FileCheck %s -check-prefix=A9
; @simple is the most basic chain of address induction variables. Chaining
; saves at least one register and avoids complex addressing and setup
diff --git a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index e42b67f..937791d 100644
--- a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -1,5 +1,7 @@
; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 | FileCheck %s -check-prefix=X64
; RUN: llc < %s -O3 -march=x86 -mcpu=core2 | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -O3 -march=x86 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X32
; @simple is the most basic chain of address induction variables. Chaining
; saves at least one register and avoids complex addressing and setup
diff --git a/test/Transforms/LoopStrengthReduce/dont_insert_redundant_ops.ll b/test/Transforms/LoopStrengthReduce/dont_insert_redundant_ops.ll
index 90051e3..16bb508 100644
--- a/test/Transforms/LoopStrengthReduce/dont_insert_redundant_ops.ll
+++ b/test/Transforms/LoopStrengthReduce/dont_insert_redundant_ops.ll
@@ -1,5 +1,9 @@
; Check that this test makes INDVAR and related stuff dead.
-; RUN: opt < %s -loop-reduce -S | grep phi | count 2
+; RUN: opt < %s -loop-reduce -S | FileCheck %s
+
+; CHECK: phi
+; CHECK: phi
+; CHECK-NOT: phi
declare i1 @pred()
diff --git a/test/Transforms/LoopUnroll/X86/partial.ll b/test/Transforms/LoopUnroll/X86/partial.ll
index 15867cb..a2b04c7 100644
--- a/test/Transforms/LoopUnroll/X86/partial.ll
+++ b/test/Transforms/LoopUnroll/X86/partial.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -S -loop-unroll -mcpu=nehalem -x86-use-partial-unrolling=1 | FileCheck %s
-; RUN: opt < %s -S -loop-unroll -mcpu=core -x86-use-partial-unrolling=1 | FileCheck -check-prefix=CHECK-NOUNRL %s
+; RUN: opt < %s -S -loop-unroll -mcpu=nehalem | FileCheck %s
+; RUN: opt < %s -S -loop-unroll -mcpu=core -unroll-runtime=0 | FileCheck -check-prefix=CHECK-NOUNRL %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -76,5 +76,52 @@ for.end: ; preds = %vector.body
ret void
}
+define zeroext i16 @test1(i16* nocapture readonly %arr, i32 %n) #0 {
+entry:
+ %cmp25 = icmp eq i32 %n, 0
+ br i1 %cmp25, label %for.end, label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %reduction.026 = phi i16 [ %add14, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i16* %arr, i64 %indvars.iv
+ %0 = load i16* %arrayidx, align 2
+ %add = add i16 %0, %reduction.026
+ %sext = mul i64 %indvars.iv, 12884901888
+ %idxprom3 = ashr exact i64 %sext, 32
+ %arrayidx4 = getelementptr inbounds i16* %arr, i64 %idxprom3
+ %1 = load i16* %arrayidx4, align 2
+ %add7 = add i16 %add, %1
+ %sext28 = mul i64 %indvars.iv, 21474836480
+ %idxprom10 = ashr exact i64 %sext28, 32
+ %arrayidx11 = getelementptr inbounds i16* %arr, i64 %idxprom10
+ %2 = load i16* %arrayidx11, align 2
+ %add14 = add i16 %add7, %2
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %reduction.0.lcssa = phi i16 [ 0, %entry ], [ %add14, %for.body ]
+ ret i16 %reduction.0.lcssa
+
+; This loop is too large to be partially unrolled (size=16)
+
+; CHECK-LABEL: @test1
+; CHECK: br
+; CHECK: br
+; CHECK: br
+; CHECK: br
+; CHECK-NOT: br
+
+; CHECK-NOUNRL-LABEL: @test1
+; CHECK-NOUNRL: br
+; CHECK-NOUNRL: br
+; CHECK-NOUNRL: br
+; CHECK-NOUNRL: br
+; CHECK-NOUNRL-NOT: br
+}
+
attributes #0 = { nounwind uwtable }
diff --git a/test/Transforms/LoopUnroll/loop-remarks.ll b/test/Transforms/LoopUnroll/loop-remarks.ll
new file mode 100644
index 0000000..ff3ac17
--- /dev/null
+++ b/test/Transforms/LoopUnroll/loop-remarks.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -S -loop-unroll -pass-remarks=loop-unroll -unroll-count=16 2>&1 | FileCheck -check-prefix=COMPLETE-UNROLL %s
+; RUN: opt < %s -S -loop-unroll -pass-remarks=loop-unroll -unroll-count=4 2>&1 | FileCheck -check-prefix=PARTIAL-UNROLL %s
+
+; COMPLETE-UNROLL: remark: {{.*}}: completely unrolled loop with 16 iterations
+; PARTIAL-UNROLL: remark: {{.*}}: unrolled loop by a factor of 4
+
+define i32 @sum() {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %s.06 = phi i32 [ 0, %entry ], [ %add1, %for.body ]
+ %i.05 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %add = add nsw i32 %i.05, 4
+ %call = tail call i32 @baz(i32 %add) #2
+ %add1 = add nsw i32 %call, %s.06
+ %inc = add nsw i32 %i.05, 1
+ %exitcond = icmp eq i32 %inc, 16
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret i32 %add1
+}
+
+declare i32 @baz(i32)
diff --git a/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll b/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll
new file mode 100644
index 0000000..9962c3d
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -loop-vectorize -mtriple=aarch64-none-linux-gnu -mattr=+neon -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+; Function Attrs: nounwind
+define i32* @array_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* %c, i32 %size) {
+;CHECK-LABEL: array_add
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+entry:
+ %cmp10 = icmp sgt i32 %size, 0
+ br i1 %cmp10, label %for.body.preheader, label %for.end
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4
+ %add = add nsw i32 %1, %0
+ %arrayidx4 = getelementptr inbounds i32* %c, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %size
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret i32* %c
+}
diff --git a/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll b/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll
new file mode 100644
index 0000000..f8eb3ed
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -loop-vectorize -mtriple=arm64-none-linux-gnu -mattr=+neon -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+; Function Attrs: nounwind
+define i32* @array_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* %c, i32 %size) {
+;CHECK-LABEL: array_add
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+entry:
+ %cmp10 = icmp sgt i32 %size, 0
+ br i1 %cmp10, label %for.body.preheader, label %for.end
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4
+ %add = add nsw i32 %1, %0
+ %arrayidx4 = getelementptr inbounds i32* %c, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %size
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret i32* %c
+}
diff --git a/test/Transforms/LoopVectorize/ARM64/gather-cost.ll b/test/Transforms/LoopVectorize/AArch64/gather-cost.ll
index bb28538..bb28538 100644
--- a/test/Transforms/LoopVectorize/ARM64/gather-cost.ll
+++ b/test/Transforms/LoopVectorize/AArch64/gather-cost.ll
diff --git a/test/Transforms/LoopStrengthReduce/ARM64/lit.local.cfg b/test/Transforms/LoopVectorize/AArch64/lit.local.cfg
index a499579..f1d1f88 100644
--- a/test/Transforms/LoopStrengthReduce/ARM64/lit.local.cfg
+++ b/test/Transforms/LoopVectorize/AArch64/lit.local.cfg
@@ -3,3 +3,4 @@ config.suffixes = ['.ll']
targets = set(config.root.targets_to_build.split())
if not 'ARM64' in targets:
config.unsupported = True
+
diff --git a/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/test/Transforms/LoopVectorize/X86/metadata-enable.ll
index 224823b..9e4e989 100644
--- a/test/Transforms/LoopVectorize/X86/metadata-enable.ll
+++ b/test/Transforms/LoopVectorize/X86/metadata-enable.ll
@@ -1,13 +1,13 @@
-; RUN: opt < %s -mcpu=corei7 -O1 -S -x86-use-partial-unrolling=0 | FileCheck %s --check-prefix=O1
-; RUN: opt < %s -mcpu=corei7 -O2 -S -x86-use-partial-unrolling=0 | FileCheck %s --check-prefix=O2
-; RUN: opt < %s -mcpu=corei7 -O3 -S -x86-use-partial-unrolling=0 | FileCheck %s --check-prefix=O3
-; RUN: opt < %s -mcpu=corei7 -Os -S -x86-use-partial-unrolling=0 | FileCheck %s --check-prefix=Os
-; RUN: opt < %s -mcpu=corei7 -Oz -S -x86-use-partial-unrolling=0 | FileCheck %s --check-prefix=Oz
-; RUN: opt < %s -mcpu=corei7 -O1 -vectorize-loops -S -x86-use-partial-unrolling=0 | FileCheck %s --check-prefix=O1VEC
-; RUN: opt < %s -mcpu=corei7 -Oz -vectorize-loops -S -x86-use-partial-unrolling=0 | FileCheck %s --check-prefix=OzVEC
-; RUN: opt < %s -mcpu=corei7 -O1 -loop-vectorize -S -x86-use-partial-unrolling=0 | FileCheck %s --check-prefix=O1VEC2
-; RUN: opt < %s -mcpu=corei7 -Oz -loop-vectorize -S -x86-use-partial-unrolling=0 | FileCheck %s --check-prefix=OzVEC2
-; RUN: opt < %s -mcpu=corei7 -O3 -disable-loop-vectorization -S -x86-use-partial-unrolling=0 | FileCheck %s --check-prefix=O3DIS
+; RUN: opt < %s -mcpu=corei7 -O1 -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O1
+; RUN: opt < %s -mcpu=corei7 -O2 -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O2
+; RUN: opt < %s -mcpu=corei7 -O3 -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O3
+; RUN: opt < %s -mcpu=corei7 -Os -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=Os
+; RUN: opt < %s -mcpu=corei7 -Oz -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=Oz
+; RUN: opt < %s -mcpu=corei7 -O1 -vectorize-loops -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O1VEC
+; RUN: opt < %s -mcpu=corei7 -Oz -vectorize-loops -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=OzVEC
+; RUN: opt < %s -mcpu=corei7 -O1 -loop-vectorize -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O1VEC2
+; RUN: opt < %s -mcpu=corei7 -Oz -loop-vectorize -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=OzVEC2
+; RUN: opt < %s -mcpu=corei7 -O3 -disable-loop-vectorization -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O3DIS
; This file tests the llvm.vectorizer.pragma forcing vectorization even when
; optimization levels are too low, or when vectorization is disabled.
diff --git a/test/Transforms/LoopVectorize/X86/vect.omp.force.ll b/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
new file mode 100644
index 0000000..84ffb27
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
@@ -0,0 +1,93 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -debug-only=loop-vectorize -stats -S 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; CHECK: LV: Loop hints: force=enabled
+; CHECK: LV: Loop hints: force=?
+; No more loops in the module
+; CHECK-NOT: LV: Loop hints: force=
+; CHECK: 2 loop-vectorize - Number of loops analyzed for vectorization
+; CHECK: 1 loop-vectorize - Number of loops vectorized
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;
+; The source code for the test:
+;
+; #include <math.h>
+; void foo(float* restrict A, float * restrict B, int size)
+; {
+; for (int i = 0; i < size; ++i) A[i] = sinf(B[i]);
+; }
+;
+
+;
+; This loop will be vectorized, although the scalar cost is lower than any of vector costs, but vectorization is explicitly forced in metadata.
+;
+
+define void @vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) {
+entry:
+ %cmp6 = icmp sgt i32 %size, 0
+ br i1 %cmp6, label %for.body.preheader, label %for.end
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
+ %call = tail call float @llvm.sin.f32(float %0)
+ %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %size
+ br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !1
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+!1 = metadata !{metadata !1, metadata !2}
+!2 = metadata !{metadata !"llvm.vectorizer.enable", i1 true}
+
+;
+; This method will not be vectorized, as scalar cost is lower than any of vector costs.
+;
+
+define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) {
+entry:
+ %cmp6 = icmp sgt i32 %size, 0
+ br i1 %cmp6, label %for.body.preheader, label %for.end
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+ %call = tail call float @llvm.sin.f32(float %0)
+ %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %size
+ br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !3
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+declare float @llvm.sin.f32(float) nounwind readnone
+
+; Dummy metadata
+!3 = metadata !{metadata !3}
+
diff --git a/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
new file mode 100644
index 0000000..1b979e5
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
@@ -0,0 +1,73 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -debug-only=loop-vectorize -stats -S -vectorizer-min-trip-count=21 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; CHECK: LV: Loop hints: force=enabled
+; CHECK: LV: Loop hints: force=?
+; No more loops in the module
+; CHECK-NOT: LV: Loop hints: force=
+; CHECK: 2 loop-vectorize - Number of loops analyzed for vectorization
+; CHECK: 1 loop-vectorize - Number of loops vectorized
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;
+; The source code for the test:
+;
+; void foo(float* restrict A, float* restrict B)
+; {
+; for (int i = 0; i < 20; ++i) A[i] += B[i];
+; }
+;
+
+;
+; This loop will be vectorized, although the trip count is below the threshold, but vectorization is explicitly forced in metadata.
+;
+define void @vectorized(float* noalias nocapture %A, float* noalias nocapture readonly %B) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
+ %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %1 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+ %add = fadd fast float %0, %1
+ store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 20
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
+
+for.end:
+ ret void
+}
+
+!1 = metadata !{metadata !1, metadata !2}
+!2 = metadata !{metadata !"llvm.vectorizer.enable", i1 true}
+
+;
+; This loop will not be vectorized as the trip count is below the threshold.
+;
+define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture readonly %B) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+ %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %1 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+ %add = fadd fast float %0, %1
+ store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 20
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3
+
+for.end:
+ ret void
+}
+
+!3 = metadata !{metadata !3}
+
diff --git a/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll b/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll
new file mode 100644
index 0000000..685d034
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll
@@ -0,0 +1,67 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-unknown-linux -S -pass-remarks='loop-vectorize' 2>&1 | FileCheck -check-prefix=VECTORIZED %s
+; RUN: opt < %s -loop-vectorize -force-vector-width=1 -force-vector-unroll=4 -mtriple=x86_64-unknown-linux -S -pass-remarks='.*vectorize.*' 2>&1 | FileCheck -check-prefix=UNROLLED %s
+
+; VECTORIZED: remark: {{.*}}.c:17:8: vectorized loop (vectorization factor: 4, unrolling interleave factor: 1)
+; UNROLLED: remark: {{.*}}.c:17:8: unrolled with interleaving factor 4 (vectorization not beneficial)
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define i32 @foo(i32 %n) #0 {
+entry:
+ %diff = alloca i32, align 4
+ %cb = alloca [16 x i8], align 16
+ %cc = alloca [16 x i8], align 16
+ store i32 0, i32* %diff, align 4, !dbg !10, !tbaa !11
+ br label %for.body, !dbg !15
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %add8 = phi i32 [ 0, %entry ], [ %add, %for.body ], !dbg !19
+ %arrayidx = getelementptr inbounds [16 x i8]* %cb, i64 0, i64 %indvars.iv, !dbg !19
+ %0 = load i8* %arrayidx, align 1, !dbg !19, !tbaa !21
+ %conv = sext i8 %0 to i32, !dbg !19
+ %arrayidx2 = getelementptr inbounds [16 x i8]* %cc, i64 0, i64 %indvars.iv, !dbg !19
+ %1 = load i8* %arrayidx2, align 1, !dbg !19, !tbaa !21
+ %conv3 = sext i8 %1 to i32, !dbg !19
+ %sub = sub i32 %conv, %conv3, !dbg !19
+ %add = add nsw i32 %sub, %add8, !dbg !19
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !15
+ %exitcond = icmp eq i64 %indvars.iv.next, 16, !dbg !15
+ br i1 %exitcond, label %for.end, label %for.body, !dbg !15
+
+for.end: ; preds = %for.body
+ store i32 %add, i32* %diff, align 4, !dbg !19, !tbaa !11
+ call void @ibar(i32* %diff) #2, !dbg !22
+ ret i32 0, !dbg !23
+}
+
+declare void @ibar(i32*) #1
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 2} ; [ DW_TAG_compile_unit ] [./vectorization-remarks.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"vectorization-remarks.c", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 5, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32)* @foo, null, null, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 5] [def] [scope 6] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [./vectorization-remarks.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5.0 "}
+!10 = metadata !{i32 8, i32 3, metadata !4, null} ; [ DW_TAG_imported_declaration ]
+!11 = metadata !{metadata !12, metadata !12, i64 0}
+!12 = metadata !{metadata !"int", metadata !13, i64 0}
+!13 = metadata !{metadata !"omnipotent char", metadata !14, i64 0}
+!14 = metadata !{metadata !"Simple C/C++ TBAA"}
+!15 = metadata !{i32 17, i32 8, metadata !16, null}
+!16 = metadata !{i32 786443, metadata !1, metadata !17, i32 17, i32 8, i32 2, i32 3} ; [ DW_TAG_lexical_block ] [./vectorization-remarks.c]
+!17 = metadata !{i32 786443, metadata !1, metadata !18, i32 17, i32 8, i32 1, i32 2} ; [ DW_TAG_lexical_block ] [./vectorization-remarks.c]
+!18 = metadata !{i32 786443, metadata !1, metadata !4, i32 17, i32 3, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [./vectorization-remarks.c]
+!19 = metadata !{i32 18, i32 5, metadata !20, null}
+!20 = metadata !{i32 786443, metadata !1, metadata !18, i32 17, i32 27, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [./vectorization-remarks.c]
+!21 = metadata !{metadata !13, metadata !13, i64 0}
+!22 = metadata !{i32 20, i32 3, metadata !4, null}
+!23 = metadata !{i32 21, i32 3, metadata !4, null}
diff --git a/test/Transforms/LoopVectorize/store-shuffle-bug.ll b/test/Transforms/LoopVectorize/store-shuffle-bug.ll
index 0ec8010..e53c120 100644
--- a/test/Transforms/LoopVectorize/store-shuffle-bug.ll
+++ b/test/Transforms/LoopVectorize/store-shuffle-bug.ll
@@ -19,18 +19,13 @@ entry:
; CHECK-LABEL: @t(
; CHECK: vector.body:
-; CHECK: load <4 x i32>
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = shufflevector
-; CHECK: load <4 x i32>
-; CHECK: [[VAR2:%[a-zA-Z0-9]+]] = shufflevector
+; CHECK: [[VAR1:%[a-zA-Z0-9.]+]] = load <4 x i32>
+; CHECK: [[VAR2:%[a-zA-Z0-9.]+]] = load <4 x i32>
; CHECK: [[VAR3:%[a-zA-Z0-9]+]] = add nsw <4 x i32> [[VAR2]], [[VAR1]]
-; CHECK: [[VAR4:%[a-zA-Z0-9]+]] = shufflevector <4 x i32> [[VAR3]]
-; CHECK: store <4 x i32> [[VAR4]]
-; CHECK: load <4 x i32>
-; CHECK: [[VAR5:%[a-zA-Z0-9]+]] = shufflevector
-; CHECK-NOT: add nsw <4 x i32> [[VAR4]], [[VAR5]]
-; CHECK-NOT: add nsw <4 x i32> [[VAR5]], [[VAR4]]
-; CHECK: add nsw <4 x i32> [[VAR3]], [[VAR5]]
+; CHECK: store <4 x i32> [[VAR3]]
+; CHECK: [[VAR4:%[a-zA-Z0-9.]+]] = load <4 x i32>
+; CHECK: add nsw <4 x i32> [[VAR3]], [[VAR4]]
+; CHECK-NOT: shufflevector
for.body:
%indvars.iv = phi i64 [ 93, %entry ], [ %indvars.iv.next, %for.body ]
diff --git a/test/Transforms/LoopVectorize/vect.omp.persistence.ll b/test/Transforms/LoopVectorize/vect.omp.persistence.ll
new file mode 100644
index 0000000..dc3df7a
--- /dev/null
+++ b/test/Transforms/LoopVectorize/vect.omp.persistence.ll
@@ -0,0 +1,88 @@
+; RUN: opt < %s -O2 -force-vector-unroll=2 -force-vector-width=4 -debug-only=loop-vectorize -stats -S 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; Loop from "rotated"
+; CHECK: LV: Loop hints: force=enabled
+; Loop from "nonrotated"
+; CHECK: LV: Loop hints: force=enabled
+; No more loops in the module
+; CHECK-NOT: LV: Loop hints: force=
+; In total only 1 loop should be rotated.
+; CHECK: 1 loop-rotate
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; See http://reviews.llvm.org/D3348 for details.
+
+;
+; Test #1
+;
+; Ensure that "llvm.vectorizer.enable" metadata was not lost prior to LoopVectorize pass.
+; In past LoopRotate was clearing that metadata.
+;
+; The source C code is:
+; void rotated(float *a, int size)
+; {
+; int t = 0;
+; #pragma omp simd
+; for (int i = 0; i < size; ++i) {
+; a[i] = a[i-5] * a[i+2];
+; ++t;
+; }
+;}
+
+define void @rotated(float* nocapture %a, i64 %size) {
+entry:
+ %cmp1 = icmp sgt i64 %size, 0
+ br i1 %cmp1, label %for.header, label %for.end
+
+for.header:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %cmp2 = icmp sgt i64 %indvars.iv, %size
+ br i1 %cmp2, label %for.end, label %for.body
+
+for.body:
+
+ %0 = add nsw i64 %indvars.iv, -5
+ %arrayidx = getelementptr inbounds float* %a, i64 %0
+ %1 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
+ %2 = add nsw i64 %indvars.iv, 2
+ %arrayidx2 = getelementptr inbounds float* %a, i64 %2
+ %3 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+ %mul = fmul float %1, %3
+ %arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv
+ store float %mul, float* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !1
+
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ br label %for.header, !llvm.loop !1
+
+for.end:
+ ret void
+}
+
+!1 = metadata !{metadata !1, metadata !2}
+!2 = metadata !{metadata !"llvm.vectorizer.enable", i1 true}
+
+;
+; Test #2
+;
+; Ensure that "llvm.vectorizer.enable" metadata was not lost even
+; if loop was not rotated (see http://reviews.llvm.org/D3348#comment-4).
+;
+define i32 @nonrotated(i32 %a) {
+entry:
+ br label %loop_cond
+loop_cond:
+ %indx = phi i32 [ 1, %entry ], [ %inc, %loop_inc ]
+ %cmp = icmp ne i32 %indx, %a
+ br i1 %cmp, label %return, label %loop_inc
+loop_inc:
+ %inc = add i32 %indx, 1
+ br label %loop_cond, !llvm.loop !3
+return:
+ ret i32 0
+}
+
+!3 = metadata !{metadata !3, metadata !4}
+!4 = metadata !{metadata !"llvm.vectorizer.enable", i1 true}
diff --git a/test/Transforms/LoopVectorize/vect.stats.ll b/test/Transforms/LoopVectorize/vect.stats.ll
new file mode 100644
index 0000000..92ec24f
--- /dev/null
+++ b/test/Transforms/LoopVectorize/vect.stats.ll
@@ -0,0 +1,65 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=4 -force-vector-width=4 -debug-only=loop-vectorize -stats -S 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+;
+; We have 2 loops, one of them is vectorizable and the second one is not.
+;
+
+; CHECK: 2 loop-vectorize - Number of loops analyzed for vectorization
+; CHECK: 1 loop-vectorize - Number of loops vectorized
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @vectorized(float* nocapture %a, i64 %size) {
+entry:
+ %cmp1 = icmp sgt i64 %size, 0
+ br i1 %cmp1, label %for.header, label %for.end
+
+for.header:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %cmp2 = icmp sgt i64 %indvars.iv, %size
+ br i1 %cmp2, label %for.end, label %for.body
+
+for.body:
+
+ %arrayidx = getelementptr inbounds float* %a, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %mul = fmul float %0, %0
+ store float %mul, float* %arrayidx, align 4
+
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ br label %for.header
+
+for.end:
+ ret void
+}
+
+define void @not_vectorized(float* nocapture %a, i64 %size) {
+entry:
+ %cmp1 = icmp sgt i64 %size, 0
+ br i1 %cmp1, label %for.header, label %for.end
+
+for.header:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %cmp2 = icmp sgt i64 %indvars.iv, %size
+ br i1 %cmp2, label %for.end, label %for.body
+
+for.body:
+
+ %0 = add nsw i64 %indvars.iv, -5
+ %arrayidx = getelementptr inbounds float* %a, i64 %0
+ %1 = load float* %arrayidx, align 4
+ %2 = add nsw i64 %indvars.iv, 2
+ %arrayidx2 = getelementptr inbounds float* %a, i64 %2
+ %3 = load float* %arrayidx2, align 4
+ %mul = fmul float %1, %3
+ %arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv
+ store float %mul, float* %arrayidx4, align 4
+
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ br label %for.header
+
+for.end:
+ ret void
+} \ No newline at end of file
diff --git a/test/Transforms/MergeFunc/crash.ll b/test/Transforms/MergeFunc/crash.ll
index 0897ba2..3475e28 100644
--- a/test/Transforms/MergeFunc/crash.ll
+++ b/test/Transforms/MergeFunc/crash.ll
@@ -8,9 +8,9 @@ target triple = "i386-pc-linux-gnu"
%.qux.2585 = type { i32, i32, i8* }
@g2 = external unnamed_addr constant [9 x i8], align 1
-@g3 = internal hidden unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585*)* @func35 to i8*)]
+@g3 = internal unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585*)* @func35 to i8*)]
-define internal hidden i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) align 2 {
+define internal i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) align 2 {
br label %1
; <label>:1
@@ -20,26 +20,26 @@ define internal hidden i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) al
ret i32 undef
}
-define internal hidden i32 @func10(%.qux.2496* nocapture %this) align 2 {
+define internal i32 @func10(%.qux.2496* nocapture %this) align 2 {
%1 = getelementptr inbounds %.qux.2496* %this, i32 0, i32 1, i32 1
%2 = load i32* %1, align 4
ret i32 %2
}
-define internal hidden i8* @func29(i32* nocapture %this) align 2 {
+define internal i8* @func29(i32* nocapture %this) align 2 {
ret i8* getelementptr inbounds ([9 x i8]* @g2, i32 0, i32 0)
}
-define internal hidden i32* @func33(%.qux.2585* nocapture %this) align 2 {
+define internal i32* @func33(%.qux.2585* nocapture %this) align 2 {
ret i32* undef
}
-define internal hidden i32* @func34(%.qux.2585* nocapture %this) align 2 {
+define internal i32* @func34(%.qux.2585* nocapture %this) align 2 {
%1 = getelementptr inbounds %.qux.2585* %this, i32 0
ret i32* undef
}
-define internal hidden i8* @func35(%.qux.2585* nocapture %this) align 2 {
+define internal i8* @func35(%.qux.2585* nocapture %this) align 2 {
%1 = getelementptr inbounds %.qux.2585* %this, i32 0, i32 2
%2 = load i8** %1, align 4
ret i8* %2
diff --git a/test/Transforms/MergeFunc/inttoptr-address-space.ll b/test/Transforms/MergeFunc/inttoptr-address-space.ll
index 0d834bc..2e5e2fc 100644
--- a/test/Transforms/MergeFunc/inttoptr-address-space.ll
+++ b/test/Transforms/MergeFunc/inttoptr-address-space.ll
@@ -6,10 +6,10 @@ target datalayout = "e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-
%.qux.2585 = type { i32, i32, i8* }
@g2 = external addrspace(1) constant [9 x i8], align 1
-@g3 = internal hidden unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585 addrspace(1)*)* @func35 to i8*)]
+@g3 = internal unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585 addrspace(1)*)* @func35 to i8*)]
-define internal hidden i32 @func10(%.qux.2496 addrspace(1)* nocapture %this) align 2 {
+define internal i32 @func10(%.qux.2496 addrspace(1)* nocapture %this) align 2 {
bb:
%tmp = getelementptr inbounds %.qux.2496 addrspace(1)* %this, i32 0, i32 1, i32 1
%tmp1 = load i32 addrspace(1)* %tmp, align 4
@@ -17,7 +17,7 @@ bb:
}
; Check for pointer bitwidth equal assertion failure
-define internal hidden i8* @func35(%.qux.2585 addrspace(1)* nocapture %this) align 2 {
+define internal i8* @func35(%.qux.2585 addrspace(1)* nocapture %this) align 2 {
bb:
; CHECK-LABEL: @func35(
; CHECK: %[[V2:.+]] = bitcast %.qux.2585 addrspace(1)* %{{.*}} to %.qux.2496 addrspace(1)*
diff --git a/test/Transforms/MergeFunc/inttoptr.ll b/test/Transforms/MergeFunc/inttoptr.ll
index 6a69e3f..86c18a0 100644
--- a/test/Transforms/MergeFunc/inttoptr.ll
+++ b/test/Transforms/MergeFunc/inttoptr.ll
@@ -8,9 +8,9 @@ target triple = "i386-pc-linux-gnu"
%.qux.2585 = type { i32, i32, i8* }
@g2 = external unnamed_addr constant [9 x i8], align 1
-@g3 = internal hidden unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585*)* @func35 to i8*)]
+@g3 = internal unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585*)* @func35 to i8*)]
-define internal hidden i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) align 2 {
+define internal i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) align 2 {
bb:
br label %bb1
@@ -21,30 +21,30 @@ bb2: ; preds = %bb1
ret i32 undef
}
-define internal hidden i32 @func10(%.qux.2496* nocapture %this) align 2 {
+define internal i32 @func10(%.qux.2496* nocapture %this) align 2 {
bb:
%tmp = getelementptr inbounds %.qux.2496* %this, i32 0, i32 1, i32 1
%tmp1 = load i32* %tmp, align 4
ret i32 %tmp1
}
-define internal hidden i8* @func29(i32* nocapture %this) align 2 {
+define internal i8* @func29(i32* nocapture %this) align 2 {
bb:
ret i8* getelementptr inbounds ([9 x i8]* @g2, i32 0, i32 0)
}
-define internal hidden i32* @func33(%.qux.2585* nocapture %this) align 2 {
+define internal i32* @func33(%.qux.2585* nocapture %this) align 2 {
bb:
ret i32* undef
}
-define internal hidden i32* @func34(%.qux.2585* nocapture %this) align 2 {
+define internal i32* @func34(%.qux.2585* nocapture %this) align 2 {
bb:
%tmp = getelementptr inbounds %.qux.2585* %this, i32 0
ret i32* undef
}
-define internal hidden i8* @func35(%.qux.2585* nocapture %this) align 2 {
+define internal i8* @func35(%.qux.2585* nocapture %this) align 2 {
bb:
; CHECK-LABEL: @func35(
; CHECK: %[[V2:.+]] = bitcast %.qux.2585* %{{.*}} to %.qux.2496*
diff --git a/test/Transforms/MergeFunc/mergefunc-struct-return.ll b/test/Transforms/MergeFunc/mergefunc-struct-return.ll
new file mode 100644
index 0000000..d2cbe43
--- /dev/null
+++ b/test/Transforms/MergeFunc/mergefunc-struct-return.ll
@@ -0,0 +1,40 @@
+; RUN: opt -mergefunc -S < %s | FileCheck %s
+
+; This test makes sure that the mergefunc pass, uses extract and insert value
+; to convert the struct result type; as struct types cannot be bitcast.
+
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+
+%kv1 = type { i32*, i32* }
+%kv2 = type { i8*, i8* }
+
+declare void @noop()
+
+define %kv1 @fn1() {
+; CHECK-LABEL: @fn1(
+ %tmp = alloca %kv1
+ %v1 = getelementptr %kv1* %tmp, i32 0, i32 0
+ store i32* null, i32** %v1
+ %v2 = getelementptr %kv1* %tmp, i32 0, i32 0
+ store i32* null, i32** %v2
+ call void @noop()
+ %v3 = load %kv1* %tmp
+ ret %kv1 %v3
+}
+
+define %kv2 @fn2() {
+; CHECK-LABEL: @fn2(
+; CHECK: %1 = tail call %kv1 @fn1()
+; CHECK: %2 = extractvalue %kv1 %1, 0
+; CHECK: %3 = bitcast i32* %2 to i8*
+; CHECK: %4 = insertvalue %kv2 undef, i8* %3, 0
+ %tmp = alloca %kv2
+ %v1 = getelementptr %kv2* %tmp, i32 0, i32 0
+ store i8* null, i8** %v1
+ %v2 = getelementptr %kv2* %tmp, i32 0, i32 0
+ store i8* null, i8** %v2
+ call void @noop()
+
+ %v3 = load %kv2* %tmp
+ ret %kv2 %v3
+}
diff --git a/test/Transforms/SLPVectorizer/AArch64/lit.local.cfg b/test/Transforms/SLPVectorizer/AArch64/lit.local.cfg
new file mode 100644
index 0000000..c420349
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/AArch64/lit.local.cfg
@@ -0,0 +1,3 @@
+targets = set(config.root.targets_to_build.split())
+if not 'AArch64' in targets:
+ config.unsupported = True
diff --git a/test/Transforms/SLPVectorizer/ARM64/mismatched-intrinsics.ll b/test/Transforms/SLPVectorizer/AArch64/mismatched-intrinsics.ll
index 3d6da12..3d6da12 100644
--- a/test/Transforms/SLPVectorizer/ARM64/mismatched-intrinsics.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/mismatched-intrinsics.ll
diff --git a/test/Transforms/SLPVectorizer/X86/align.ll b/test/Transforms/SLPVectorizer/X86/align.ll
new file mode 100644
index 0000000..f586573
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/align.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; Simple 3-pair chain with loads and stores
+; CHECK: test1
+define void @test1(double* %a, double* %b, double* %c) {
+entry:
+ %agg.tmp.i.i.sroa.0 = alloca [3 x double], align 16
+; CHECK: %[[V0:[0-9]+]] = load <2 x double>* %[[V2:[0-9]+]], align 8
+ %i0 = load double* %a
+ %i1 = load double* %b
+ %mul = fmul double %i0, %i1
+ %store1 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
+ %store2 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+; CHECK: store <2 x double> %[[V1:[0-9]+]], <2 x double>* %[[V2:[0-9]+]], align 8
+ store double %mul, double* %store1
+ store double %mul5, double* %store2, align 16
+; CHECK: ret
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/call.ll b/test/Transforms/SLPVectorizer/X86/call.ll
new file mode 100644
index 0000000..83d45c0
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/call.ll
@@ -0,0 +1,128 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=-999 -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+declare double @sin(double)
+declare double @cos(double)
+declare double @pow(double, double)
+declare double @exp2(double)
+declare i64 @round(i64)
+
+
+; CHECK: sin_libm
+; CHECK: call <2 x double> @llvm.sin.v2f64
+; CHECK: ret void
+define void @sin_libm(double* %a, double* %b, double* %c) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %call = tail call double @sin(double %mul) nounwind readnone
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ %call5 = tail call double @sin(double %mul5) nounwind readnone
+ store double %call, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %call5, double* %arrayidx5, align 8
+ ret void
+}
+
+; CHECK: cos_libm
+; CHECK: call <2 x double> @llvm.cos.v2f64
+; CHECK: ret void
+define void @cos_libm(double* %a, double* %b, double* %c) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %call = tail call double @cos(double %mul) nounwind readnone
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ %call5 = tail call double @cos(double %mul5) nounwind readnone
+ store double %call, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %call5, double* %arrayidx5, align 8
+ ret void
+}
+
+; CHECK: pow_libm
+; CHECK: call <2 x double> @llvm.pow.v2f64
+; CHECK: ret void
+define void @pow_libm(double* %a, double* %b, double* %c) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %call = tail call double @pow(double %mul,double %mul) nounwind readnone
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ %call5 = tail call double @pow(double %mul5,double %mul5) nounwind readnone
+ store double %call, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %call5, double* %arrayidx5, align 8
+ ret void
+}
+
+
+; CHECK: exp2_libm
+; CHECK: call <2 x double> @llvm.exp2.v2f64
+; CHECK: ret void
+define void @exp2_libm(double* %a, double* %b, double* %c) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %call = tail call double @exp2(double %mul) nounwind readnone
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ %call5 = tail call double @exp2(double %mul5) nounwind readnone
+ store double %call, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %call5, double* %arrayidx5, align 8
+ ret void
+}
+
+
+; Negative test case
+; CHECK: round_custom
+; CHECK-NOT: load <4 x i64>
+; CHECK: ret void
+define void @round_custom(i64* %a, i64* %b, i64* %c) {
+entry:
+ %i0 = load i64* %a, align 8
+ %i1 = load i64* %b, align 8
+ %mul = mul i64 %i0, %i1
+ %call = tail call i64 @round(i64 %mul) nounwind readnone
+ %arrayidx3 = getelementptr inbounds i64* %a, i64 1
+ %i3 = load i64* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds i64* %b, i64 1
+ %i4 = load i64* %arrayidx4, align 8
+ %mul5 = mul i64 %i3, %i4
+ %call5 = tail call i64 @round(i64 %mul5) nounwind readnone
+ store i64 %call, i64* %c, align 8
+ %arrayidx5 = getelementptr inbounds i64* %c, i64 1
+ store i64 %call5, i64* %arrayidx5, align 8
+ ret void
+}
+
+
+; CHECK: declare <2 x double> @llvm.sin.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.cos.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>) #0
+; CHECK: declare <2 x double> @llvm.exp2.v2f64(<2 x double>) #0
+
+; CHECK: attributes #0 = { nounwind readnone }
+
diff --git a/test/Transforms/SLPVectorizer/X86/consecutive-access.ll b/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
new file mode 100644
index 0000000..f4f112f
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
@@ -0,0 +1,175 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+@A = common global [2000 x double] zeroinitializer, align 16
+@B = common global [2000 x double] zeroinitializer, align 16
+@C = common global [2000 x float] zeroinitializer, align 16
+@D = common global [2000 x float] zeroinitializer, align 16
+
+; Currently SCEV isn't smart enough to figure out that accesses
+; A[3*i], A[3*i+1] and A[3*i+2] are consecutive, but in future
+; that would hopefully be fixed. For now, check that this isn't
+; vectorized.
+; CHECK-LABEL: foo_3double
+; CHECK-NOT: x double>
+; Function Attrs: nounwind ssp uwtable
+define void @foo_3double(i32 %u) #0 {
+entry:
+ %u.addr = alloca i32, align 4
+ store i32 %u, i32* %u.addr, align 4
+ %mul = mul nsw i32 %u, 3
+ %idxprom = sext i32 %mul to i64
+ %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
+ %0 = load double* %arrayidx, align 8
+ %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
+ %1 = load double* %arrayidx4, align 8
+ %add5 = fadd double %0, %1
+ store double %add5, double* %arrayidx, align 8
+ %add11 = add nsw i32 %mul, 1
+ %idxprom12 = sext i32 %add11 to i64
+ %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
+ %2 = load double* %arrayidx13, align 8
+ %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
+ %3 = load double* %arrayidx17, align 8
+ %add18 = fadd double %2, %3
+ store double %add18, double* %arrayidx13, align 8
+ %add24 = add nsw i32 %mul, 2
+ %idxprom25 = sext i32 %add24 to i64
+ %arrayidx26 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom25
+ %4 = load double* %arrayidx26, align 8
+ %arrayidx30 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom25
+ %5 = load double* %arrayidx30, align 8
+ %add31 = fadd double %4, %5
+ store double %add31, double* %arrayidx26, align 8
+ ret void
+}
+
+; SCEV should be able to tell that accesses A[C1 + C2*i], A[C1 + C2*i], ...
+; A[C1 + C2*i] are consecutive, if C2 is a power of 2, and C2 > C1 > 0.
+; Thus, the following code should be vectorized.
+; CHECK-LABEL: foo_2double
+; CHECK: x double>
+; Function Attrs: nounwind ssp uwtable
+define void @foo_2double(i32 %u) #0 {
+entry:
+ %u.addr = alloca i32, align 4
+ store i32 %u, i32* %u.addr, align 4
+ %mul = mul nsw i32 %u, 2
+ %idxprom = sext i32 %mul to i64
+ %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
+ %0 = load double* %arrayidx, align 8
+ %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
+ %1 = load double* %arrayidx4, align 8
+ %add5 = fadd double %0, %1
+ store double %add5, double* %arrayidx, align 8
+ %add11 = add nsw i32 %mul, 1
+ %idxprom12 = sext i32 %add11 to i64
+ %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
+ %2 = load double* %arrayidx13, align 8
+ %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
+ %3 = load double* %arrayidx17, align 8
+ %add18 = fadd double %2, %3
+ store double %add18, double* %arrayidx13, align 8
+ ret void
+}
+
+; Similar to the previous test, but with different datatype.
+; CHECK-LABEL: foo_4float
+; CHECK: x float>
+; Function Attrs: nounwind ssp uwtable
+define void @foo_4float(i32 %u) #0 {
+entry:
+ %u.addr = alloca i32, align 4
+ store i32 %u, i32* %u.addr, align 4
+ %mul = mul nsw i32 %u, 4
+ %idxprom = sext i32 %mul to i64
+ %arrayidx = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom
+ %0 = load float* %arrayidx, align 4
+ %arrayidx4 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom
+ %1 = load float* %arrayidx4, align 4
+ %add5 = fadd float %0, %1
+ store float %add5, float* %arrayidx, align 4
+ %add11 = add nsw i32 %mul, 1
+ %idxprom12 = sext i32 %add11 to i64
+ %arrayidx13 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom12
+ %2 = load float* %arrayidx13, align 4
+ %arrayidx17 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom12
+ %3 = load float* %arrayidx17, align 4
+ %add18 = fadd float %2, %3
+ store float %add18, float* %arrayidx13, align 4
+ %add24 = add nsw i32 %mul, 2
+ %idxprom25 = sext i32 %add24 to i64
+ %arrayidx26 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom25
+ %4 = load float* %arrayidx26, align 4
+ %arrayidx30 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom25
+ %5 = load float* %arrayidx30, align 4
+ %add31 = fadd float %4, %5
+ store float %add31, float* %arrayidx26, align 4
+ %add37 = add nsw i32 %mul, 3
+ %idxprom38 = sext i32 %add37 to i64
+ %arrayidx39 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom38
+ %6 = load float* %arrayidx39, align 4
+ %arrayidx43 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom38
+ %7 = load float* %arrayidx43, align 4
+ %add44 = fadd float %6, %7
+ store float %add44, float* %arrayidx39, align 4
+ ret void
+}
+
+; Similar to the previous tests, but now we are dealing with AddRec SCEV.
+; CHECK-LABEL: foo_loop
+; CHECK: x double>
+; Function Attrs: nounwind ssp uwtable
+define i32 @foo_loop(double* %A, i32 %n) #0 {
+entry:
+ %A.addr = alloca double*, align 8
+ %n.addr = alloca i32, align 4
+ %sum = alloca double, align 8
+ %i = alloca i32, align 4
+ store double* %A, double** %A.addr, align 8
+ store i32 %n, i32* %n.addr, align 4
+ store double 0.000000e+00, double* %sum, align 8
+ store i32 0, i32* %i, align 4
+ %cmp1 = icmp slt i32 0, %n
+ br i1 %cmp1, label %for.body.lr.ph, label %for.end
+
+for.body.lr.ph: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.lr.ph, %for.body
+ %0 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
+ %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ]
+ %mul = mul nsw i32 %0, 2
+ %idxprom = sext i32 %mul to i64
+ %arrayidx = getelementptr inbounds double* %A, i64 %idxprom
+ %2 = load double* %arrayidx, align 8
+ %mul1 = fmul double 7.000000e+00, %2
+ %add = add nsw i32 %mul, 1
+ %idxprom3 = sext i32 %add to i64
+ %arrayidx4 = getelementptr inbounds double* %A, i64 %idxprom3
+ %3 = load double* %arrayidx4, align 8
+ %mul5 = fmul double 7.000000e+00, %3
+ %add6 = fadd double %mul1, %mul5
+ %add7 = fadd double %1, %add6
+ store double %add7, double* %sum, align 8
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %i, align 4
+ %cmp = icmp slt i32 %inc, %n
+ br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
+
+for.cond.for.end_crit_edge: ; preds = %for.body
+ %split = phi double [ %add7, %for.body ]
+ br label %for.end
+
+for.end: ; preds = %for.cond.for.end_crit_edge, %entry
+ %.lcssa = phi double [ %split, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry ]
+ %conv = fptosi double %.lcssa to i32
+ ret i32 %conv
+}
+
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5.0 "}
diff --git a/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll b/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
new file mode 100644
index 0000000..ed22574
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
@@ -0,0 +1,31 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; We will keep trying to vectorize the basic block even we already find vectorized store.
+; CHECK: test1
+; CHECK: store <2 x double>
+; CHECK: ret
+define void @test1(double* %a, double* %b, double* %c, double* %d) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ store double %mul, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %mul5, double* %arrayidx5, align 8
+ %0 = bitcast double* %a to <4 x i32>*
+ %1 = load <4 x i32>* %0, align 8
+ %2 = bitcast double* %b to <4 x i32>*
+ %3 = load <4 x i32>* %2, align 8
+ %4 = mul <4 x i32> %1, %3
+ %5 = bitcast double* %d to <4 x i32>*
+ store <4 x i32> %4, <4 x i32>* %5, align 8
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/cse.ll b/test/Transforms/SLPVectorizer/X86/cse.ll
index bbfd6f2..d2ad7eb 100644
--- a/test/Transforms/SLPVectorizer/X86/cse.ll
+++ b/test/Transforms/SLPVectorizer/X86/cse.ll
@@ -217,3 +217,33 @@ return: ; preds = %entry, %if.end
ret i32 0
}
+%class.B.53.55 = type { %class.A.52.54, double }
+%class.A.52.54 = type { double, double, double }
+
+@a = external global double, align 8
+
+define void @PR19646(%class.B.53.55* %this) {
+entry:
+ br i1 undef, label %if.end13, label %if.end13
+
+sw.epilog7: ; No predecessors!
+ %.in = getelementptr inbounds %class.B.53.55* %this, i64 0, i32 0, i32 1
+ %0 = load double* %.in, align 8
+ %add = fadd double undef, 0.000000e+00
+ %add6 = fadd double %add, %0
+ %1 = load double* @a, align 8
+ %add8 = fadd double %1, 0.000000e+00
+ %_dy = getelementptr inbounds %class.B.53.55* %this, i64 0, i32 0, i32 2
+ %2 = load double* %_dy, align 8
+ %add10 = fadd double %add8, %2
+ br i1 undef, label %if.then12, label %if.end13
+
+if.then12: ; preds = %sw.epilog7
+ %3 = load double* undef, align 8
+ br label %if.end13
+
+if.end13: ; preds = %if.then12, %sw.epilog7, %entry
+ %x.1 = phi double [ 0.000000e+00, %if.then12 ], [ %add6, %sw.epilog7 ], [ undef, %entry ], [ undef, %entry ]
+ %b.0 = phi double [ %3, %if.then12 ], [ %add10, %sw.epilog7 ], [ undef, %entry], [ undef, %entry ]
+ unreachable
+}
diff --git a/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll b/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
index 7537ea3..9eda29f 100644
--- a/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
+++ b/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
@@ -195,11 +195,35 @@ define <4 x float> @simple_select_partial_vector(<4 x float> %a, <4 x float> %b,
ret <4 x float> %rb
}
+; Make sure that vectorization happens even if insertelements operations
+; must be rescheduled. The case here is from compiling Julia.
+define <4 x float> @reschedule_extract(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: @reschedule_extract(
+; CHECK: %1 = fadd <4 x float> %a, %b
+ %a0 = extractelement <4 x float> %a, i32 0
+ %b0 = extractelement <4 x float> %b, i32 0
+ %c0 = fadd float %a0, %b0
+ %v0 = insertelement <4 x float> undef, float %c0, i32 0
+ %a1 = extractelement <4 x float> %a, i32 1
+ %b1 = extractelement <4 x float> %b, i32 1
+ %c1 = fadd float %a1, %b1
+ %v1 = insertelement <4 x float> %v0, float %c1, i32 1
+ %a2 = extractelement <4 x float> %a, i32 2
+ %b2 = extractelement <4 x float> %b, i32 2
+ %c2 = fadd float %a2, %b2
+ %v2 = insertelement <4 x float> %v1, float %c2, i32 2
+ %a3 = extractelement <4 x float> %a, i32 3
+ %b3 = extractelement <4 x float> %b, i32 3
+ %c3 = fadd float %a3, %b3
+ %v3 = insertelement <4 x float> %v2, float %c3, i32 3
+ ret <4 x float> %v3
+}
+
; Check that cost model for vectorization takes credit for
; instructions that are erased.
define <4 x float> @take_credit(<4 x float> %a, <4 x float> %b) {
; ZEROTHRESH-LABEL: @take_credit(
-; ZEROTHRESH-CHECK: %1 = fadd <4 x float> %a, %b
+; ZEROTHRESH: %1 = fadd <4 x float> %a, %b
%a0 = extractelement <4 x float> %a, i32 0
%b0 = extractelement <4 x float> %b, i32 0
%c0 = fadd float %a0, %b0
@@ -219,4 +243,40 @@ define <4 x float> @take_credit(<4 x float> %a, <4 x float> %b) {
ret <4 x float> %v3
}
+; Make sure we handle multiple trees that feed one build vector correctly.
+define <4 x double> @multi_tree(double %w, double %x, double %y, double %z) {
+entry:
+ %t0 = fadd double %w , 0.000000e+00
+ %t1 = fadd double %x , 1.000000e+00
+ %t2 = fadd double %y , 2.000000e+00
+ %t3 = fadd double %z , 3.000000e+00
+ %t4 = fmul double %t0, 1.000000e+00
+ %i1 = insertelement <4 x double> undef, double %t4, i32 3
+ %t5 = fmul double %t1, 1.000000e+00
+ %i2 = insertelement <4 x double> %i1, double %t5, i32 2
+ %t6 = fmul double %t2, 1.000000e+00
+ %i3 = insertelement <4 x double> %i2, double %t6, i32 1
+ %t7 = fmul double %t3, 1.000000e+00
+ %i4 = insertelement <4 x double> %i3, double %t7, i32 0
+ ret <4 x double> %i4
+}
+; CHECK-LABEL: @multi_tree
+; CHECK-DAG: %[[V0:.+]] = insertelement <2 x double> undef, double %w, i32 0
+; CHECK-DAG: %[[V1:.+]] = insertelement <2 x double> %[[V0]], double %x, i32 1
+; CHECK-DAG: %[[V2:.+]] = fadd <2 x double> %[[V1]], <double 0.000000e+00, double 1.000000e+00>
+; CHECK-DAG: %[[V3:.+]] = insertelement <2 x double> undef, double %y, i32 0
+; CHECK-DAG: %[[V4:.+]] = insertelement <2 x double> %[[V3]], double %z, i32 1
+; CHECK-DAG: %[[V5:.+]] = fadd <2 x double> %[[V4]], <double 2.000000e+00, double 3.000000e+00>
+; CHECK-DAG: %[[V6:.+]] = fmul <2 x double> <double 1.000000e+00, double 1.000000e+00>, %[[V2]]
+; CHECK-DAG: %[[V7:.+]] = extractelement <2 x double> %[[V6]], i32 0
+; CHECK-DAG: %[[I1:.+]] = insertelement <4 x double> undef, double %[[V7]], i32 3
+; CHECK-DAG: %[[V8:.+]] = extractelement <2 x double> %[[V6]], i32 1
+; CHECK-DAG: %[[I2:.+]] = insertelement <4 x double> %[[I1]], double %[[V8]], i32 2
+; CHECK-DAG: %[[V9:.+]] = fmul <2 x double> <double 1.000000e+00, double 1.000000e+00>, %[[V5]]
+; CHECK-DAG: %[[V10:.+]] = extractelement <2 x double> %[[V9]], i32 0
+; CHECK-DAG: %[[I3:.+]] = insertelement <4 x double> %i2, double %[[V10]], i32 1
+; CHECK-DAG: %[[V11:.+]] = extractelement <2 x double> %[[V9]], i32 1
+; CHECK-DAG: %[[I4:.+]] = insertelement <4 x double> %i3, double %[[V11]], i32 0
+; CHECK: ret <4 x double> %[[I4]]
+
attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/Transforms/SLPVectorizer/X86/intrinsic.ll b/test/Transforms/SLPVectorizer/X86/intrinsic.ll
index 2b7ee75..30c5093 100644
--- a/test/Transforms/SLPVectorizer/X86/intrinsic.ll
+++ b/test/Transforms/SLPVectorizer/X86/intrinsic.ll
@@ -71,5 +71,49 @@ entry:
ret void
}
+declare i32 @llvm.bswap.i32(i32) nounwind readnone
+define void @vec_bswap_i32(i32* %a, i32* %b, i32* %c) {
+entry:
+ %i0 = load i32* %a, align 4
+ %i1 = load i32* %b, align 4
+ %add1 = add i32 %i0, %i1
+ %call1 = tail call i32 @llvm.bswap.i32(i32 %add1) nounwind readnone
+
+ %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %i2 = load i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %i3 = load i32* %arrayidx3, align 4
+ %add2 = add i32 %i2, %i3
+ %call2 = tail call i32 @llvm.bswap.i32(i32 %add2) nounwind readnone
+
+ %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %i4 = load i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %i5 = load i32* %arrayidx5, align 4
+ %add3 = add i32 %i4, %i5
+ %call3 = tail call i32 @llvm.bswap.i32(i32 %add3) nounwind readnone
+
+ %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %i6 = load i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %i7 = load i32* %arrayidx7, align 4
+ %add4 = add i32 %i6, %i7
+ %call4 = tail call i32 @llvm.bswap.i32(i32 %add4) nounwind readnone
+ store i32 %call1, i32* %c, align 4
+ %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ store i32 %call2, i32* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ store i32 %call3, i32* %arrayidx9, align 4
+ %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ store i32 %call4, i32* %arrayidx10, align 4
+ ret void
+
+; CHECK-LABEL: @vec_bswap_i32(
+; CHECK: load <4 x i32>
+; CHECK: load <4 x i32>
+; CHECK: call <4 x i32> @llvm.bswap.v4i32
+; CHECK: store <4 x i32>
+; CHECK: ret
+}
diff --git a/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll b/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll
new file mode 100644
index 0000000..b250735
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll
@@ -0,0 +1,36 @@
+; RUN: opt < %s -slp-vectorizer -o - -S -slp-threshold=-1000
+
+target datalayout = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx--nvidiacl"
+
+; CTLZ cannot be vectorized currently because the second argument is a scalar
+; for both the scalar and vector forms of the intrinsic. In the future it
+; should be possible to vectorize such functions.
+; Test causes an assert if LLVM tries to vectorize CTLZ.
+
+define <2 x i8> @cltz_test(<2 x i8> %x) #0 {
+entry:
+ %0 = extractelement <2 x i8> %x, i32 0
+ %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
+ %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
+ %1 = extractelement <2 x i8> %x, i32 1
+ %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
+ %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
+ ret <2 x i8> %vecinit2
+}
+
+define <2 x i8> @cltz_test2(<2 x i8> %x) #1 {
+entry:
+ %0 = extractelement <2 x i8> %x, i32 0
+ %1 = extractelement <2 x i8> %x, i32 1
+ %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
+ %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
+ %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
+ %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
+ ret <2 x i8> %vecinit2
+}
+
+declare i8 @llvm.ctlz.i8(i8, i1) #3
+
+attributes #0 = { alwaysinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
diff --git a/test/Transforms/SLPVectorizer/X86/value-bug.ll b/test/Transforms/SLPVectorizer/X86/value-bug.ll
new file mode 100644
index 0000000..64d2ae1
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/value-bug.ll
@@ -0,0 +1,80 @@
+; RUN: opt -slp-vectorizer < %s -S -mtriple="x86_64-grtev3-linux-gnu" -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-grtev3-linux-gnu"
+
+; We used to crash on this example because we were building a constant
+; expression during vectorization and the vectorizer expects instructions
+; as elements of the vectorized tree.
+; CHECK-LABEL: @test
+; PR19621
+
+define void @test() {
+bb279:
+ br label %bb283
+
+bb283:
+ %Av.sroa.8.0 = phi float [ undef, %bb279 ], [ %tmp315, %exit ]
+ %Av.sroa.5.0 = phi float [ undef, %bb279 ], [ %tmp319, %exit ]
+ %Av.sroa.3.0 = phi float [ undef, %bb279 ], [ %tmp307, %exit ]
+ %Av.sroa.0.0 = phi float [ undef, %bb279 ], [ %tmp317, %exit ]
+ br label %bb284
+
+bb284:
+ %tmp7.i = fpext float %Av.sroa.3.0 to double
+ %tmp8.i = fsub double %tmp7.i, undef
+ %tmp9.i = fsub double %tmp8.i, undef
+ %tmp17.i = fpext float %Av.sroa.8.0 to double
+ %tmp19.i = fsub double %tmp17.i, undef
+ %tmp20.i = fsub double %tmp19.i, undef
+ br label %bb21.i
+
+bb21.i:
+ br i1 undef, label %bb22.i, label %exit
+
+bb22.i:
+ %tmp24.i = fadd double undef, %tmp9.i
+ %tmp26.i = fadd double undef, %tmp20.i
+ br label %bb32.i
+
+bb32.i:
+ %xs.0.i = phi double [ %tmp24.i, %bb22.i ], [ 0.000000e+00, %bb32.i ]
+ %ys.0.i = phi double [ %tmp26.i, %bb22.i ], [ 0.000000e+00, %bb32.i ]
+ br i1 undef, label %bb32.i, label %bb21.i
+
+exit:
+ %tmp303 = fpext float %Av.sroa.0.0 to double
+ %tmp304 = fmul double %tmp303, undef
+ %tmp305 = fadd double undef, %tmp304
+ %tmp306 = fadd double %tmp305, undef
+ %tmp307 = fptrunc double %tmp306 to float
+ %tmp311 = fpext float %Av.sroa.5.0 to double
+ %tmp312 = fmul double %tmp311, 0.000000e+00
+ %tmp313 = fadd double undef, %tmp312
+ %tmp314 = fadd double %tmp313, undef
+ %tmp315 = fptrunc double %tmp314 to float
+ %tmp317 = fptrunc double undef to float
+ %tmp319 = fptrunc double undef to float
+ br label %bb283
+}
+
+; Make sure that we probably handle constant folded vectorized trees. The
+; vectorizer starts at the type (%t2, %t3) and wil constant fold the tree.
+; The code that handles insertelement instructions must handle this.
+define <4 x double> @constant_folding() {
+entry:
+ %t0 = fadd double 1.000000e+00 , 0.000000e+00
+ %t1 = fadd double 1.000000e+00 , 1.000000e+00
+ %t2 = fmul double %t0, 1.000000e+00
+ %i1 = insertelement <4 x double> undef, double %t2, i32 1
+ %t3 = fmul double %t1, 1.000000e+00
+ %i2 = insertelement <4 x double> %i1, double %t3, i32 0
+ ret <4 x double> %i2
+}
+
+; CHECK-LABEL: @constant_folding
+; CHECK: %[[V0:.+]] = extractelement <2 x double> <double 1.000000e+00, double 2.000000e+00>, i32 0
+; CHECK: %[[V1:.+]] = insertelement <4 x double> undef, double %[[V0]], i32 1
+; CHECK: %[[V2:.+]] = extractelement <2 x double> <double 1.000000e+00, double 2.000000e+00>, i32 1
+; CHECK: %[[V3:.+]] = insertelement <4 x double> %[[V1]], double %[[V2]], i32 0
+; CHECK: ret <4 x double> %[[V3]]
diff --git a/test/Transforms/SLPVectorizer/ARM64/lit.local.cfg b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
index 84ac981..40532cd 100644
--- a/test/Transforms/SLPVectorizer/ARM64/lit.local.cfg
+++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
@@ -1,3 +1,4 @@
targets = set(config.root.targets_to_build.split())
-if not 'ARM64' in targets:
+if not 'NVPTX' in targets:
config.unsupported = True
+
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
new file mode 100644
index 0000000..850fc4c
--- /dev/null
+++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
@@ -0,0 +1,59 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=PTX
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX
+; RUN: opt < %s -S -separate-const-offset-from-gep -gvn -dce | FileCheck %s --check-prefix=IR
+
+; Verifies the SeparateConstOffsetFromGEP pass.
+; The following code computes
+; *output = array[x][y] + array[x][y+1] + array[x+1][y] + array[x+1][y+1]
+;
+; We expect SeparateConstOffsetFromGEP to transform it to
+;
+; float *base = &a[x][y];
+; *output = base[0] + base[1] + base[32] + base[33];
+;
+; so the backend can emit PTX that uses fewer virtual registers.
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-unknown-unknown"
+
+@array = internal addrspace(3) constant [32 x [32 x float]] zeroinitializer, align 4
+
+define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
+.preheader:
+ %0 = zext i32 %y to i64
+ %1 = zext i32 %x to i64
+ %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %3 = addrspacecast float addrspace(3)* %2 to float*
+ %4 = load float* %3, align 4
+ %5 = fadd float %4, 0.000000e+00
+ %6 = add i32 %y, 1
+ %7 = zext i32 %6 to i64
+ %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
+ %9 = addrspacecast float addrspace(3)* %8 to float*
+ %10 = load float* %9, align 4
+ %11 = fadd float %5, %10
+ %12 = add i32 %x, 1
+ %13 = zext i32 %12 to i64
+ %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
+ %15 = addrspacecast float addrspace(3)* %14 to float*
+ %16 = load float* %15, align 4
+ %17 = fadd float %11, %16
+ %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
+ %19 = addrspacecast float addrspace(3)* %18 to float*
+ %20 = load float* %19, align 4
+ %21 = fadd float %17, %20
+ store float %21, float* %output, align 4
+ ret void
+}
+
+; PTX-LABEL: sum_of_array(
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
+
+; IR-LABEL: @sum_of_array(
+; IR: [[BASE_PTR:%[0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i32 %x, i32 %y
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
new file mode 100644
index 0000000..2e50f5f
--- /dev/null
+++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
@@ -0,0 +1,137 @@
+; RUN: opt < %s -separate-const-offset-from-gep -dce -S | FileCheck %s
+
+; Several unit tests for -separate-const-offset-from-gep. The transformation
+; heavily relies on TargetTransformInfo, so we put these tests under
+; target-specific folders.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+; target triple is necessary; otherwise TargetTransformInfo rejects any
+; addressing mode.
+target triple = "nvptx64-unknown-unknown"
+
+%struct.S = type { float, double }
+
+@struct_array = global [1024 x %struct.S] zeroinitializer, align 16
+@float_2d_array = global [32 x [32 x float]] zeroinitializer, align 4
+
+; We should not extract any struct field indices, because fields in a struct
+; may have different types.
+define double* @struct(i32 %i) {
+entry:
+ %add = add nsw i32 %i, 5
+ %idxprom = sext i32 %add to i64
+ %p = getelementptr inbounds [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
+ ret double* %p
+}
+; CHECK-LABEL: @struct
+; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i32 %i, i32 1
+
+; We should be able to trace into sext/zext if it's directly used as a GEP
+; index.
+define float* @sext_zext(i32 %i, i32 %j) {
+entry:
+ %i1 = add i32 %i, 1
+ %j2 = add i32 %j, 2
+ %i1.ext = sext i32 %i1 to i64
+ %j2.ext = zext i32 %j2 to i64
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i1.ext, i64 %j2.ext
+ ret float* %p
+}
+; CHECK-LABEL: @sext_zext
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i32 %i, i32 %j
+; CHECK: getelementptr float* %{{[0-9]+}}, i64 34
+
+; We should be able to trace into sext/zext if it can be distributed to both
+; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b)
+define float* @ext_add_no_overflow(i64 %a, i32 %b, i64 %c, i32 %d) {
+ %b1 = add nsw i32 %b, 1
+ %b2 = sext i32 %b1 to i64
+ %i = add i64 %a, %b2
+ %d1 = add nuw i32 %d, 1
+ %d2 = zext i32 %d1 to i64
+ %j = add i64 %c, %d2
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
+ ret float* %p
+}
+; CHECK-LABEL: @ext_add_no_overflow
+; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[0-9]+}}, i64 %{{[0-9]+}}
+; CHECK: getelementptr float* [[BASE_PTR]], i64 33
+
+; Similar to @ext_add_no_overflow, we should be able to trace into sext/zext if
+; its operand is an "or" instruction.
+define float* @ext_or(i64 %a, i32 %b) {
+entry:
+ %b1 = shl i32 %b, 2
+ %b2 = or i32 %b1, 1
+ %b3 = or i32 %b1, 2
+ %b2.ext = sext i32 %b2 to i64
+ %b3.ext = sext i32 %b3 to i64
+ %i = add i64 %a, %b2.ext
+ %j = add i64 %a, %b3.ext
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
+ ret float* %p
+}
+; CHECK-LABEL: @ext_or
+; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[0-9]+}}, i64 %{{[0-9]+}}
+; CHECK: getelementptr float* [[BASE_PTR]], i64 34
+
+; We should treat "or" with no common bits (%k) as "add", and leave "or" with
+; potentially common bits (%l) as is.
+define float* @or(i64 %i) {
+entry:
+ %j = shl i64 %i, 2
+ %k = or i64 %j, 3 ; no common bits
+ %l = or i64 %j, 4 ; potentially common bits
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %k, i64 %l
+ ret float* %p
+}
+; CHECK-LABEL: @or
+; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %j, i64 %l
+; CHECK: getelementptr float* [[BASE_PTR]], i64 96
+
+; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out = b +
+; 5". When extracting the constant offset 5, make sure "*out = b + 5" isn't
+; affected.
+define float* @expr(i64 %a, i64 %b, i64* %out) {
+entry:
+ %b5 = add i64 %b, 5
+ %i = add i64 %b5, %a
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 0
+ store i64 %b5, i64* %out
+ ret float* %p
+}
+; CHECK-LABEL: @expr
+; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %0, i64 0
+; CHECK: getelementptr float* [[BASE_PTR]], i64 160
+; CHECK: store i64 %b5, i64* %out
+
+; Verifies we handle "sub" correctly.
+define float* @sub(i64 %i, i64 %j) {
+ %i2 = sub i64 %i, 5 ; i - 5
+ %j2 = sub i64 5, %j ; 5 - i
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2
+ ret float* %p
+}
+; CHECK-LABEL: @sub
+; CHECK: %[[j2:[0-9]+]] = sub i64 0, %j
+; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
+; CHECK: getelementptr float* [[BASE_PTR]], i64 -155
+
+%struct.Packed = type <{ [3 x i32], [8 x i64] }> ; <> means packed
+
+; Verifies we can emit correct uglygep if the address is not natually aligned.
+define i64* @packed_struct(i32 %i, i32 %j) {
+entry:
+ %s = alloca [1024 x %struct.Packed], align 16
+ %add = add nsw i32 %j, 3
+ %idxprom = sext i32 %add to i64
+ %add1 = add nsw i32 %i, 1
+ %idxprom2 = sext i32 %add1 to i64
+ %arrayidx3 = getelementptr inbounds [1024 x %struct.Packed]* %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom
+ ret i64* %arrayidx3
+}
+; CHECK-LABEL: @packed_struct
+; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [1024 x %struct.Packed]* %s, i64 0, i32 %i, i32 1, i32 %j
+; CHECK: [[CASTED_PTR:%[0-9]+]] = bitcast i64* [[BASE_PTR]] to i8*
+; CHECK: %uglygep = getelementptr i8* [[CASTED_PTR]], i64 100
+; CHECK: bitcast i8* %uglygep to i64*
diff --git a/test/Transforms/SimplifyCFG/extract-cost.ll b/test/Transforms/SimplifyCFG/extract-cost.ll
new file mode 100644
index 0000000..9c86725
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/extract-cost.ll
@@ -0,0 +1,22 @@
+; RUN: opt -simplifycfg -S < %s | FileCheck %s
+
+declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) #1
+
+define i32 @f(i32 %a, i32 %b) #0 {
+entry:
+ %uadd = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+ %cmp = extractvalue { i32, i1 } %uadd, 1
+ br i1 %cmp, label %return, label %if.end
+
+if.end: ; preds = %entry
+ %0 = extractvalue { i32, i1 } %uadd, 0
+ br label %return
+
+return: ; preds = %entry, %if.end
+ %retval.0 = phi i32 [ %0, %if.end ], [ 0, %entry ]
+ ret i32 %retval.0
+
+; CHECK-LABEL: @f(
+; CHECK-NOT: phi
+; CHECK: select
+}
diff --git a/test/Transforms/TailCallElim/basic.ll b/test/Transforms/TailCallElim/basic.ll
index 35420ab..341736d 100644
--- a/test/Transforms/TailCallElim/basic.ll
+++ b/test/Transforms/TailCallElim/basic.ll
@@ -143,3 +143,34 @@ cond_false:
call void @noarg()
ret i32* null
}
+
+; Don't tail call if a byval arg is captured.
+define void @test9(i32* byval %a) {
+; CHECK-LABEL: define void @test9(
+; CHECK: {{^ *}}call void @use(
+ call void @use(i32* %a)
+ ret void
+}
+
+%struct.X = type { i8* }
+
+declare void @ctor(%struct.X*)
+define void @test10(%struct.X* noalias sret %agg.result, i1 zeroext %b) {
+; CHECK-LABEL @test10
+entry:
+ %x = alloca %struct.X, align 8
+ br i1 %b, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @ctor(%struct.X* %agg.result)
+; CHECK: tail call void @ctor
+ br label %return
+
+if.end:
+ call void @ctor(%struct.X* %x)
+; CHECK: call void @ctor
+ br label %return
+
+return:
+ ret void
+}