aboutsummaryrefslogtreecommitdiffstats
path: root/test/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'test/Analysis')
-rw-r--r--test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll8
-rw-r--r--test/Analysis/BasicAA/2003-03-04-GEPCrash.ll4
-rw-r--r--test/Analysis/BasicAA/2003-04-22-GEPProblem.ll8
-rw-r--r--test/Analysis/BasicAA/2003-04-25-GEPCrash.ll4
-rw-r--r--test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll6
-rw-r--r--test/Analysis/BasicAA/2003-06-01-AliasCrash.ll12
-rw-r--r--test/Analysis/BasicAA/2003-07-03-BasicAACrash.ll4
-rw-r--r--test/Analysis/BasicAA/2003-09-19-LocalArgument.ll4
-rw-r--r--test/Analysis/BasicAA/2003-11-04-SimpleCases.ll10
-rw-r--r--test/Analysis/BasicAA/2003-12-11-ConstExprGEP.ll8
-rw-r--r--test/Analysis/BasicAA/2004-07-28-MustAliasbug.ll6
-rw-r--r--test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll12
-rw-r--r--test/Analysis/BasicAA/2006-11-03-BasicAAVectorCrash.ll4
-rw-r--r--test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll8
-rw-r--r--test/Analysis/BasicAA/2007-08-01-NoAliasAndGEP.ll8
-rw-r--r--test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll6
-rw-r--r--test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll6
-rw-r--r--test/Analysis/BasicAA/2007-11-05-SizeCrash.ll8
-rw-r--r--test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll8
-rw-r--r--test/Analysis/BasicAA/2008-04-15-Byval.ll4
-rw-r--r--test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll4
-rw-r--r--test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll2
-rw-r--r--test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll6
-rw-r--r--test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll8
-rw-r--r--test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll6
-rw-r--r--test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll6
-rw-r--r--test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll22
-rw-r--r--test/Analysis/BasicAA/aligned-overread.ll8
-rw-r--r--test/Analysis/BasicAA/args-rets-allocas-loads.ll4
-rw-r--r--test/Analysis/BasicAA/byval.ll4
-rw-r--r--test/Analysis/BasicAA/cas.ll4
-rw-r--r--test/Analysis/BasicAA/constant-over-index.ll4
-rw-r--r--test/Analysis/BasicAA/cs-cs.ll10
-rw-r--r--test/Analysis/BasicAA/dag.ll2
-rw-r--r--test/Analysis/BasicAA/featuretest.ll50
-rw-r--r--test/Analysis/BasicAA/full-store-partial-alias.ll8
-rw-r--r--test/Analysis/BasicAA/gcsetest.ll10
-rw-r--r--test/Analysis/BasicAA/gep-alias.ll110
-rw-r--r--test/Analysis/BasicAA/global-size.ll16
-rw-r--r--test/Analysis/BasicAA/intrinsics.ll4
-rw-r--r--test/Analysis/BasicAA/invariant_load.ll6
-rw-r--r--test/Analysis/BasicAA/memset_pattern.ll2
-rw-r--r--test/Analysis/BasicAA/modref.ll36
-rw-r--r--test/Analysis/BasicAA/must-and-partial.ll8
-rw-r--r--test/Analysis/BasicAA/no-escape-call.ll10
-rw-r--r--test/Analysis/BasicAA/noalias-bugs.ll10
-rw-r--r--test/Analysis/BasicAA/noalias-geps.ll32
-rw-r--r--test/Analysis/BasicAA/noalias-param.ll4
-rw-r--r--test/Analysis/BasicAA/nocapture.ll10
-rw-r--r--test/Analysis/BasicAA/phi-aa.ll16
-rw-r--r--test/Analysis/BasicAA/phi-spec-order.ll26
-rw-r--r--test/Analysis/BasicAA/phi-speculation.ll28
-rw-r--r--test/Analysis/BasicAA/pr18573.ll8
-rw-r--r--test/Analysis/BasicAA/store-promote.ll14
-rw-r--r--test/Analysis/BasicAA/struct-geps.ll50
-rw-r--r--test/Analysis/BasicAA/tailcall-modref.ll4
-rw-r--r--test/Analysis/BasicAA/underlying-value.ll8
-rw-r--r--test/Analysis/BasicAA/unreachable-block.ll2
-rw-r--r--test/Analysis/BasicAA/zext.ll64
-rw-r--r--test/Analysis/BlockFrequencyInfo/basic.ll4
-rw-r--r--test/Analysis/BranchProbabilityInfo/basic.ll12
-rw-r--r--test/Analysis/BranchProbabilityInfo/loop.ll20
-rw-r--r--test/Analysis/BranchProbabilityInfo/pr18705.ll18
-rw-r--r--test/Analysis/CFLAliasAnalysis/asm-global-bugfix.ll4
-rw-r--r--test/Analysis/CFLAliasAnalysis/branch-alias.ll73
-rw-r--r--test/Analysis/CFLAliasAnalysis/const-expr-gep.ll48
-rw-r--r--test/Analysis/CFLAliasAnalysis/constant-over-index.ll4
-rw-r--r--test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll8
-rw-r--r--test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll6
-rw-r--r--test/Analysis/CFLAliasAnalysis/multilevel-combine.ll2
-rw-r--r--test/Analysis/CFLAliasAnalysis/multilevel.ll4
-rw-r--r--test/Analysis/CFLAliasAnalysis/must-and-partial.ll16
-rw-r--r--test/Analysis/CFLAliasAnalysis/simple.ll10
-rw-r--r--test/Analysis/CFLAliasAnalysis/stratified-attrs-indexing.ll2
-rw-r--r--test/Analysis/CostModel/AArch64/store.ll4
-rw-r--r--test/Analysis/CostModel/ARM/gep.ll48
-rw-r--r--test/Analysis/CostModel/ARM/insertelement.ll12
-rw-r--r--test/Analysis/CostModel/PowerPC/load_store.ll16
-rw-r--r--test/Analysis/CostModel/X86/gep.ll48
-rw-r--r--test/Analysis/CostModel/X86/intrinsic-cost.ll16
-rw-r--r--test/Analysis/CostModel/X86/load_store.ll34
-rw-r--r--test/Analysis/CostModel/X86/loop_v2.ll12
-rw-r--r--test/Analysis/CostModel/X86/testshiftlshr.ll16
-rw-r--r--test/Analysis/CostModel/X86/testshiftshl.ll16
-rw-r--r--test/Analysis/CostModel/X86/vectorized-loop.ll16
-rw-r--r--test/Analysis/Delinearization/a.ll4
-rw-r--r--test/Analysis/Delinearization/gcd_multiply_expr.ll46
-rw-r--r--test/Analysis/Delinearization/himeno_1.ll28
-rw-r--r--test/Analysis/Delinearization/himeno_2.ll28
-rw-r--r--test/Analysis/Delinearization/iv_times_constant_in_subscript.ll6
-rw-r--r--test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll4
-rw-r--r--test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll4
-rw-r--r--test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll4
-rw-r--r--test/Analysis/Delinearization/multidim_only_ivs_2d.ll12
-rw-r--r--test/Analysis/Delinearization/multidim_only_ivs_2d_nested.ll2
-rw-r--r--test/Analysis/Delinearization/multidim_only_ivs_3d.ll4
-rw-r--r--test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll2
-rw-r--r--test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll4
-rw-r--r--test/Analysis/Delinearization/undef.ll4
-rw-r--r--test/Analysis/DependenceAnalysis/Banerjee.ll130
-rw-r--r--test/Analysis/DependenceAnalysis/Constraints.ll103
-rw-r--r--test/Analysis/DependenceAnalysis/Coupled.ll128
-rw-r--r--test/Analysis/DependenceAnalysis/ExactRDIV.ll112
-rw-r--r--test/Analysis/DependenceAnalysis/ExactSIV.ll112
-rw-r--r--test/Analysis/DependenceAnalysis/GCD.ll100
-rw-r--r--test/Analysis/DependenceAnalysis/Invariant.ll8
-rw-r--r--test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll6
-rw-r--r--test/Analysis/DependenceAnalysis/Preliminary.ll86
-rw-r--r--test/Analysis/DependenceAnalysis/Propagating.ll102
-rw-r--r--test/Analysis/DependenceAnalysis/Separability.ll56
-rw-r--r--test/Analysis/DependenceAnalysis/StrongSIV.ll88
-rw-r--r--test/Analysis/DependenceAnalysis/SymbolicRDIV.ll58
-rw-r--r--test/Analysis/DependenceAnalysis/SymbolicSIV.ll72
-rw-r--r--test/Analysis/DependenceAnalysis/UsefulGEP.ll51
-rw-r--r--test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll56
-rw-r--r--test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll56
-rw-r--r--test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll56
-rw-r--r--test/Analysis/DependenceAnalysis/ZIV.ll18
-rw-r--r--test/Analysis/Dominators/invoke.ll2
-rw-r--r--test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll2
-rw-r--r--test/Analysis/GlobalsModRef/aliastest.ll2
-rw-r--r--test/Analysis/GlobalsModRef/chaining-analysis.ll2
-rw-r--r--test/Analysis/GlobalsModRef/indirect-global.ll8
-rw-r--r--test/Analysis/GlobalsModRef/modreftest.ll2
-rw-r--r--test/Analysis/GlobalsModRef/pr12351.ll6
-rw-r--r--test/Analysis/GlobalsModRef/volatile-instrs.ll4
-rw-r--r--test/Analysis/LazyCallGraph/basic.ll8
-rw-r--r--test/Analysis/Lint/cppeh-catch-intrinsics-clean.ll6
-rw-r--r--test/Analysis/Lint/cppeh-catch-intrinsics.ll28
-rw-r--r--test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll14
-rw-r--r--test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll60
-rw-r--r--test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll38
-rw-r--r--test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll6
-rw-r--r--test/Analysis/ScalarEvolution/2007-07-15-NegativeStride.ll2
-rw-r--r--test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll6
-rw-r--r--test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll6
-rw-r--r--test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll4
-rw-r--r--test/Analysis/ScalarEvolution/2009-05-09-PointerEdgeCount.ll8
-rw-r--r--test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll4
-rw-r--r--test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll8
-rw-r--r--test/Analysis/ScalarEvolution/SolveQuadraticEquation.ll2
-rw-r--r--test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll2
-rw-r--r--test/Analysis/ScalarEvolution/avoid-smax-0.ll8
-rw-r--r--test/Analysis/ScalarEvolution/avoid-smax-1.ll30
-rw-r--r--test/Analysis/ScalarEvolution/ext-antecedent.ll45
-rw-r--r--test/Analysis/ScalarEvolution/fold.ll14
-rw-r--r--test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll6
-rw-r--r--test/Analysis/ScalarEvolution/infer-via-ranges.ll30
-rw-r--r--test/Analysis/ScalarEvolution/load-with-range-metadata.ll4
-rw-r--r--test/Analysis/ScalarEvolution/load.ll28
-rw-r--r--test/Analysis/ScalarEvolution/max-trip-count-address-space.ll6
-rw-r--r--test/Analysis/ScalarEvolution/max-trip-count.ll22
-rw-r--r--test/Analysis/ScalarEvolution/min-max-exprs.ll6
-rw-r--r--test/Analysis/ScalarEvolution/nowrap-preinc-limits.ll44
-rw-r--r--test/Analysis/ScalarEvolution/nsw-offset-assume.ll22
-rw-r--r--test/Analysis/ScalarEvolution/nsw-offset.ll22
-rw-r--r--test/Analysis/ScalarEvolution/nsw.ll30
-rw-r--r--test/Analysis/ScalarEvolution/pr22179.ll6
-rw-r--r--test/Analysis/ScalarEvolution/pr22674.ll14
-rw-r--r--test/Analysis/ScalarEvolution/pr22856.ll33
-rw-r--r--test/Analysis/ScalarEvolution/range-signedness.ll39
-rw-r--r--test/Analysis/ScalarEvolution/scev-aa.ll52
-rw-r--r--test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll2
-rw-r--r--test/Analysis/ScalarEvolution/sext-inreg.ll2
-rw-r--r--test/Analysis/ScalarEvolution/sext-iv-0.ll14
-rw-r--r--test/Analysis/ScalarEvolution/sext-iv-1.ll24
-rw-r--r--test/Analysis/ScalarEvolution/sext-iv-2.ll8
-rw-r--r--test/Analysis/ScalarEvolution/sle.ll4
-rw-r--r--test/Analysis/ScalarEvolution/trip-count.ll66
-rw-r--r--test/Analysis/ScalarEvolution/trip-count11.ll12
-rw-r--r--test/Analysis/ScalarEvolution/trip-count12.ll4
-rw-r--r--test/Analysis/ScalarEvolution/trip-count2.ll2
-rw-r--r--test/Analysis/ScalarEvolution/trip-count3.ll6
-rw-r--r--test/Analysis/ScalarEvolution/trip-count4.ll4
-rw-r--r--test/Analysis/ScalarEvolution/trip-count5.ll10
-rw-r--r--test/Analysis/ScalarEvolution/trip-count6.ll4
-rw-r--r--test/Analysis/ScalarEvolution/trip-count7.ll30
-rw-r--r--test/Analysis/ScalarEvolution/zext-signed-addrec.ll8
-rw-r--r--test/Analysis/ScalarEvolution/zext-wrap.ll2
-rw-r--r--test/Analysis/ScopedNoAliasAA/basic-domains.ll30
-rw-r--r--test/Analysis/ScopedNoAliasAA/basic.ll16
-rw-r--r--test/Analysis/ScopedNoAliasAA/basic2.ll22
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/PR17620.ll8
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/aliastest.ll16
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll4
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/cyclic.ll26
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/dse.ll16
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll50
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll16
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/licm.ll12
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll30
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/precedence.ll6
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/sink.ll4
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll198
-rw-r--r--test/Analysis/ValueTracking/memory-dereferenceable.ll14
-rw-r--r--test/Analysis/ValueTracking/pr23011.ll15
196 files changed, 2333 insertions, 1826 deletions
diff --git a/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll b/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll
index 45f6088..d712e33 100644
--- a/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll
+++ b/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll
@@ -5,15 +5,15 @@
; RUN: opt < %s -basicaa -gvn -instcombine -S | FileCheck %s
define i32 @test() {
-; CHECK: %Y.DONOTREMOVE = load i32* %A
+; CHECK: %Y.DONOTREMOVE = load i32, i32* %A
; CHECK: %Z = sub i32 0, %Y.DONOTREMOVE
%A = alloca i32
store i32 0, i32* %A
- %X = load i32* %A
+ %X = load i32, i32* %A
%B = bitcast i32* %A to i8*
- %C = getelementptr i8* %B, i64 1
+ %C = getelementptr i8, i8* %B, i64 1
store i8 1, i8* %C ; Aliases %A
- %Y.DONOTREMOVE = load i32* %A
+ %Y.DONOTREMOVE = load i32, i32* %A
%Z = sub i32 %X, %Y.DONOTREMOVE
ret i32 %Z
}
diff --git a/test/Analysis/BasicAA/2003-03-04-GEPCrash.ll b/test/Analysis/BasicAA/2003-03-04-GEPCrash.ll
index 4f8eabb..5a93b3d 100644
--- a/test/Analysis/BasicAA/2003-03-04-GEPCrash.ll
+++ b/test/Analysis/BasicAA/2003-03-04-GEPCrash.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -basicaa -aa-eval -disable-output 2>/dev/null
; Test for a bug in BasicAA which caused a crash when querying equality of P1&P2
define void @test({[2 x i32],[2 x i32]}* %A, i64 %X, i64 %Y) {
- %P1 = getelementptr {[2 x i32],[2 x i32]}* %A, i64 0, i32 0, i64 %X
- %P2 = getelementptr {[2 x i32],[2 x i32]}* %A, i64 0, i32 1, i64 %Y
+ %P1 = getelementptr {[2 x i32],[2 x i32]}, {[2 x i32],[2 x i32]}* %A, i64 0, i32 0, i64 %X
+ %P2 = getelementptr {[2 x i32],[2 x i32]}, {[2 x i32],[2 x i32]}* %A, i64 0, i32 1, i64 %Y
ret void
}
diff --git a/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll b/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll
index 78f74a0..96ca071 100644
--- a/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll
+++ b/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll
@@ -4,11 +4,11 @@
define i32 @test(i32 *%Ptr, i64 %V) {
; CHECK: sub i32 %X, %Y
- %P2 = getelementptr i32* %Ptr, i64 1
- %P1 = getelementptr i32* %Ptr, i64 %V
- %X = load i32* %P1
+ %P2 = getelementptr i32, i32* %Ptr, i64 1
+ %P1 = getelementptr i32, i32* %Ptr, i64 %V
+ %X = load i32, i32* %P1
store i32 5, i32* %P2
- %Y = load i32* %P1
+ %Y = load i32, i32* %P1
%Z = sub i32 %X, %Y
ret i32 %Z
}
diff --git a/test/Analysis/BasicAA/2003-04-25-GEPCrash.ll b/test/Analysis/BasicAA/2003-04-25-GEPCrash.ll
index 97bc38e..ea26c22 100644
--- a/test/Analysis/BasicAA/2003-04-25-GEPCrash.ll
+++ b/test/Analysis/BasicAA/2003-04-25-GEPCrash.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -basicaa -aa-eval -disable-output 2>/dev/null
; Test for a bug in BasicAA which caused a crash when querying equality of P1&P2
define void @test([17 x i16]* %mask_bits) {
- %P1 = getelementptr [17 x i16]* %mask_bits, i64 0, i64 0
- %P2 = getelementptr [17 x i16]* %mask_bits, i64 252645134, i64 0
+ %P1 = getelementptr [17 x i16], [17 x i16]* %mask_bits, i64 0, i64 0
+ %P2 = getelementptr [17 x i16], [17 x i16]* %mask_bits, i64 252645134, i64 0
ret void
}
diff --git a/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll b/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll
index 8ca3469..fb5b3bb 100644
--- a/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll
+++ b/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll
@@ -6,13 +6,13 @@ define void @table_reindex(%struct..apr_table_t* %t.1) { ; No predecessors!
br label %loopentry
loopentry: ; preds = %0, %no_exit
- %tmp.101 = getelementptr %struct..apr_table_t* %t.1, i64 0, i32 0, i32 2
- %tmp.11 = load i32* %tmp.101 ; <i32> [#uses=0]
+ %tmp.101 = getelementptr %struct..apr_table_t, %struct..apr_table_t* %t.1, i64 0, i32 0, i32 2
+ %tmp.11 = load i32, i32* %tmp.101 ; <i32> [#uses=0]
br i1 false, label %no_exit, label %UnifiedExitNode
no_exit: ; preds = %loopentry
%tmp.25 = sext i32 0 to i64 ; <i64> [#uses=1]
- %tmp.261 = getelementptr %struct..apr_table_t* %t.1, i64 0, i32 3, i64 %tmp.25 ; <i32*> [#uses=1]
+ %tmp.261 = getelementptr %struct..apr_table_t, %struct..apr_table_t* %t.1, i64 0, i32 3, i64 %tmp.25 ; <i32*> [#uses=1]
store i32 0, i32* %tmp.261
br label %loopentry
diff --git a/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll b/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll
index 0abd384..ace5982 100644
--- a/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll
+++ b/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll
@@ -1,11 +1,11 @@
; RUN: opt < %s -basicaa -aa-eval -disable-output 2>/dev/null
define i32 @MTConcat([3 x i32]* %a.1) {
- %tmp.961 = getelementptr [3 x i32]* %a.1, i64 0, i64 4
- %tmp.97 = load i32* %tmp.961
- %tmp.119 = getelementptr [3 x i32]* %a.1, i64 1, i64 0
- %tmp.120 = load i32* %tmp.119
- %tmp.1541 = getelementptr [3 x i32]* %a.1, i64 0, i64 4
- %tmp.155 = load i32* %tmp.1541
+ %tmp.961 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 0, i64 4
+ %tmp.97 = load i32, i32* %tmp.961
+ %tmp.119 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 1, i64 0
+ %tmp.120 = load i32, i32* %tmp.119
+ %tmp.1541 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 0, i64 4
+ %tmp.155 = load i32, i32* %tmp.1541
ret i32 0
}
diff --git a/test/Analysis/BasicAA/2003-07-03-BasicAACrash.ll b/test/Analysis/BasicAA/2003-07-03-BasicAACrash.ll
index 3e813fa..7aaae2a 100644
--- a/test/Analysis/BasicAA/2003-07-03-BasicAACrash.ll
+++ b/test/Analysis/BasicAA/2003-07-03-BasicAACrash.ll
@@ -4,7 +4,7 @@
%struct..RefRect = type { %struct..RefPoint, %struct..RefPoint }
define i32 @BMT_CommitPartDrawObj() {
- %tmp.19111 = getelementptr %struct..RefRect* null, i64 0, i32 0, i32 1, i32 2
- %tmp.20311 = getelementptr %struct..RefRect* null, i64 0, i32 1, i32 1, i32 2
+ %tmp.19111 = getelementptr %struct..RefRect, %struct..RefRect* null, i64 0, i32 0, i32 1, i32 2
+ %tmp.20311 = getelementptr %struct..RefRect, %struct..RefRect* null, i64 0, i32 1, i32 1, i32 2
ret i32 0
}
diff --git a/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll b/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll
index fd4c239..1e75d64 100644
--- a/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll
+++ b/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll
@@ -7,9 +7,9 @@
define i32 @test(i32* %P) {
%X = alloca i32
- %V1 = load i32* %P
+ %V1 = load i32, i32* %P
store i32 0, i32* %X
- %V2 = load i32* %P
+ %V2 = load i32, i32* %P
%Diff = sub i32 %V1, %V2
ret i32 %Diff
}
diff --git a/test/Analysis/BasicAA/2003-11-04-SimpleCases.ll b/test/Analysis/BasicAA/2003-11-04-SimpleCases.ll
index f2b06cb..f8d4195 100644
--- a/test/Analysis/BasicAA/2003-11-04-SimpleCases.ll
+++ b/test/Analysis/BasicAA/2003-11-04-SimpleCases.ll
@@ -11,10 +11,10 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; CHECK-NOT: MayAlias:
define void @test(%T* %P) {
- %A = getelementptr %T* %P, i64 0
- %B = getelementptr %T* %P, i64 0, i32 0
- %C = getelementptr %T* %P, i64 0, i32 1
- %D = getelementptr %T* %P, i64 0, i32 1, i64 0
- %E = getelementptr %T* %P, i64 0, i32 1, i64 5
+ %A = getelementptr %T, %T* %P, i64 0
+ %B = getelementptr %T, %T* %P, i64 0, i32 0
+ %C = getelementptr %T, %T* %P, i64 0, i32 1
+ %D = getelementptr %T, %T* %P, i64 0, i32 1, i64 0
+ %E = getelementptr %T, %T* %P, i64 0, i32 1, i64 5
ret void
}
diff --git a/test/Analysis/BasicAA/2003-12-11-ConstExprGEP.ll b/test/Analysis/BasicAA/2003-12-11-ConstExprGEP.ll
index 42512b8..957502f 100644
--- a/test/Analysis/BasicAA/2003-12-11-ConstExprGEP.ll
+++ b/test/Analysis/BasicAA/2003-12-11-ConstExprGEP.ll
@@ -13,10 +13,10 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; CHECK-NOT: MayAlias:
define void @test() {
- %D = getelementptr %T* @G, i64 0, i32 0
- %E = getelementptr %T* @G, i64 0, i32 1, i64 5
- %F = getelementptr i32* getelementptr (%T* @G, i64 0, i32 0), i64 0
- %X = getelementptr [10 x i8]* getelementptr (%T* @G, i64 0, i32 1), i64 0, i64 5
+ %D = getelementptr %T, %T* @G, i64 0, i32 0
+ %E = getelementptr %T, %T* @G, i64 0, i32 1, i64 5
+ %F = getelementptr i32, i32* getelementptr (%T, %T* @G, i64 0, i32 0), i64 0
+ %X = getelementptr [10 x i8], [10 x i8]* getelementptr (%T, %T* @G, i64 0, i32 1), i64 0, i64 5
ret void
}
diff --git a/test/Analysis/BasicAA/2004-07-28-MustAliasbug.ll b/test/Analysis/BasicAA/2004-07-28-MustAliasbug.ll
index 578aa594..16573a7 100644
--- a/test/Analysis/BasicAA/2004-07-28-MustAliasbug.ll
+++ b/test/Analysis/BasicAA/2004-07-28-MustAliasbug.ll
@@ -2,9 +2,9 @@
define void @test({i32,i32 }* %P) {
; CHECK: store i32 0, i32* %X
- %Q = getelementptr {i32,i32}* %P, i32 1
- %X = getelementptr {i32,i32}* %Q, i32 0, i32 1
- %Y = getelementptr {i32,i32}* %Q, i32 1, i32 1
+ %Q = getelementptr {i32,i32}, {i32,i32}* %P, i32 1
+ %X = getelementptr {i32,i32}, {i32,i32}* %Q, i32 0, i32 1
+ %Y = getelementptr {i32,i32}, {i32,i32}* %Q, i32 1, i32 1
store i32 0, i32* %X
store i32 1, i32* %Y
ret void
diff --git a/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll b/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll
index 06a804c..eb05e1e 100644
--- a/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll
+++ b/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll
@@ -12,12 +12,12 @@ entry:
no_exit: ; preds = %no_exit, %entry
%i.0.0 = phi i32 [ 0, %entry ], [ %inc, %no_exit ] ; <i32> [#uses=2]
- %tmp.6 = getelementptr [3 x [3 x i32]]* %X, i32 0, i32 0, i32 %i.0.0 ; <i32*> [#uses=1]
+ %tmp.6 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 0, i32 %i.0.0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp.6
- %tmp.8 = getelementptr [3 x [3 x i32]]* %X, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp.9 = load i32* %tmp.8 ; <i32> [#uses=1]
- %tmp.11 = getelementptr [3 x [3 x i32]]* %X, i32 0, i32 1, i32 0 ; <i32*> [#uses=1]
- %tmp.12 = load i32* %tmp.11 ; <i32> [#uses=1]
+ %tmp.8 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp.9 = load i32, i32* %tmp.8 ; <i32> [#uses=1]
+ %tmp.11 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 1, i32 0 ; <i32*> [#uses=1]
+ %tmp.12 = load i32, i32* %tmp.11 ; <i32> [#uses=1]
%tmp.13 = add i32 %tmp.12, %tmp.9 ; <i32> [#uses=1]
%inc = add i32 %i.0.0, 1 ; <i32> [#uses=2]
%tmp.2 = icmp slt i32 %inc, %N ; <i1> [#uses=1]
@@ -25,7 +25,7 @@ no_exit: ; preds = %no_exit, %entry
loopexit: ; preds = %no_exit, %entry
%Y.0.1 = phi i32 [ 0, %entry ], [ %tmp.13, %no_exit ] ; <i32> [#uses=1]
- %tmp.4 = getelementptr [3 x [3 x i32]]* %X, i32 0, i32 0 ; <[3 x i32]*> [#uses=1]
+ %tmp.4 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 0 ; <[3 x i32]*> [#uses=1]
%tmp.15 = call i32 (...)* @foo( [3 x i32]* %tmp.4, i32 %Y.0.1 ) ; <i32> [#uses=0]
ret void
}
diff --git a/test/Analysis/BasicAA/2006-11-03-BasicAAVectorCrash.ll b/test/Analysis/BasicAA/2006-11-03-BasicAAVectorCrash.ll
index 0db5815..a331f7e 100644
--- a/test/Analysis/BasicAA/2006-11-03-BasicAAVectorCrash.ll
+++ b/test/Analysis/BasicAA/2006-11-03-BasicAAVectorCrash.ll
@@ -22,7 +22,7 @@ cond_true264.i: ; preds = %bb239.i
ret void
cond_false277.i: ; preds = %bb239.i
- %tmp1062.i = getelementptr [2 x <4 x i32>]* null, i32 0, i32 1 ; <<4 x i32>*> [#uses=1]
+ %tmp1062.i = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* null, i32 0, i32 1 ; <<4 x i32>*> [#uses=1]
store <4 x i32> zeroinitializer, <4 x i32>* %tmp1062.i
br i1 false, label %cond_true1032.i, label %cond_false1063.i85
@@ -33,7 +33,7 @@ bb1013.i: ; preds = %bb205.i
ret void
cond_true1032.i: ; preds = %cond_false277.i
- %tmp1187.i = getelementptr [2 x <4 x i32>]* null, i32 0, i32 0, i32 7 ; <i32*> [#uses=1]
+ %tmp1187.i = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* null, i32 0, i32 0, i32 7 ; <i32*> [#uses=1]
store i32 0, i32* %tmp1187.i
br label %bb2037.i
diff --git a/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll b/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll
index 46b6aaf..86bbd44 100644
--- a/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll
+++ b/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll
@@ -21,14 +21,14 @@ target triple = "i686-apple-darwin8"
; CHECK: ret i32 %Z
define i32 @test(%struct.closure_type* %tmp18169) {
- %tmp18174 = getelementptr %struct.closure_type* %tmp18169, i32 0, i32 4, i32 0, i32 0 ; <i32*> [#uses=2]
+ %tmp18174 = getelementptr %struct.closure_type, %struct.closure_type* %tmp18169, i32 0, i32 4, i32 0, i32 0 ; <i32*> [#uses=2]
%tmp18269 = bitcast i32* %tmp18174 to %struct.STYLE* ; <%struct.STYLE*> [#uses=1]
- %A = load i32* %tmp18174 ; <i32> [#uses=1]
+ %A = load i32, i32* %tmp18174 ; <i32> [#uses=1]
- %tmp18272 = getelementptr %struct.STYLE* %tmp18269, i32 0, i32 0, i32 0, i32 2 ; <i16*> [#uses=1]
+ %tmp18272 = getelementptr %struct.STYLE, %struct.STYLE* %tmp18269, i32 0, i32 0, i32 0, i32 2 ; <i16*> [#uses=1]
store i16 123, i16* %tmp18272
- %Q = load i32* %tmp18174 ; <i32> [#uses=1]
+ %Q = load i32, i32* %tmp18174 ; <i32> [#uses=1]
%Z = sub i32 %A, %Q ; <i32> [#uses=1]
ret i32 %Z
}
diff --git a/test/Analysis/BasicAA/2007-08-01-NoAliasAndGEP.ll b/test/Analysis/BasicAA/2007-08-01-NoAliasAndGEP.ll
index d11e75d..8388d6c 100644
--- a/test/Analysis/BasicAA/2007-08-01-NoAliasAndGEP.ll
+++ b/test/Analysis/BasicAA/2007-08-01-NoAliasAndGEP.ll
@@ -8,10 +8,10 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; CHECK: 6 partial alias responses
define void @foo(i32* noalias %p, i32* noalias %q, i32 %i, i32 %j) {
- %Ipointer = getelementptr i32* %p, i32 %i
- %qi = getelementptr i32* %q, i32 %i
- %Jpointer = getelementptr i32* %p, i32 %j
- %qj = getelementptr i32* %q, i32 %j
+ %Ipointer = getelementptr i32, i32* %p, i32 %i
+ %qi = getelementptr i32, i32* %q, i32 %i
+ %Jpointer = getelementptr i32, i32* %p, i32 %j
+ %qj = getelementptr i32, i32* %q, i32 %j
store i32 0, i32* %p
store i32 0, i32* %Ipointer
store i32 0, i32* %Jpointer
diff --git a/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll b/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll
index ec0e2bd..5f0e117 100644
--- a/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll
+++ b/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll
@@ -5,10 +5,10 @@ declare i16 @llvm.cttz.i16(i16, i1)
define i32 @test(i32* %P, i16* %Q) {
; CHECK: ret i32 0
- %A = load i16* %Q ; <i16> [#uses=1]
- %x = load i32* %P ; <i32> [#uses=1]
+ %A = load i16, i16* %Q ; <i16> [#uses=1]
+ %x = load i32, i32* %P ; <i32> [#uses=1]
%B = call i16 @llvm.cttz.i16( i16 %A, i1 true ) ; <i16> [#uses=1]
- %y = load i32* %P ; <i32> [#uses=1]
+ %y = load i32, i32* %P ; <i32> [#uses=1]
store i16 %B, i16* %Q
%z = sub i32 %x, %y ; <i32> [#uses=1]
ret i32 %z
diff --git a/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll b/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll
index 429160e..2d33e94 100644
--- a/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll
+++ b/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll
@@ -8,9 +8,9 @@ define i32 @_Z3fooP1A(%struct.A* %b) {
; CHECK: %tmp7 = load
; CHECK: ret i32 %tmp7
entry:
- store i32 1, i32* getelementptr (%struct.B* @a, i32 0, i32 0, i32 0), align 8
- %tmp4 = getelementptr %struct.A* %b, i32 0, i32 0 ;<i32*> [#uses=1]
+ store i32 1, i32* getelementptr (%struct.B, %struct.B* @a, i32 0, i32 0, i32 0), align 8
+ %tmp4 = getelementptr %struct.A, %struct.A* %b, i32 0, i32 0 ;<i32*> [#uses=1]
store i32 0, i32* %tmp4, align 4
- %tmp7 = load i32* getelementptr (%struct.B* @a, i32 0, i32 0, i32 0), align 8 ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* getelementptr (%struct.B, %struct.B* @a, i32 0, i32 0, i32 0), align 8 ; <i32> [#uses=1]
ret i32 %tmp7
}
diff --git a/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll b/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll
index 32d9930..069bd0b 100644
--- a/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll
+++ b/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll
@@ -14,18 +14,18 @@ target triple = "x86_64-unknown-linux-gnu"
define i32 @uhci_suspend(%struct.usb_hcd* %hcd) {
entry:
- %tmp17 = getelementptr %struct.usb_hcd* %hcd, i32 0, i32 2, i64 1
+ %tmp17 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 2, i64 1
; <i64*> [#uses=1]
%tmp1718 = bitcast i64* %tmp17 to i32* ; <i32*> [#uses=1]
- %tmp19 = load i32* %tmp1718, align 4 ; <i32> [#uses=0]
+ %tmp19 = load i32, i32* %tmp1718, align 4 ; <i32> [#uses=0]
br i1 false, label %cond_true34, label %done_okay
cond_true34: ; preds = %entry
- %tmp631 = getelementptr %struct.usb_hcd* %hcd, i32 0, i32 2, i64
+ %tmp631 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 2, i64
2305843009213693950 ; <i64*> [#uses=1]
%tmp70 = bitcast i64* %tmp631 to %struct.device**
- %tmp71 = load %struct.device** %tmp70, align 8
+ %tmp71 = load %struct.device*, %struct.device** %tmp70, align 8
ret i32 undef
diff --git a/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll b/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll
index cd997ea..20be13d 100644
--- a/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll
+++ b/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll
@@ -13,17 +13,17 @@ target triple = "x86_64-unknown-linux-gnu"
define i32 @ehci_pci_setup(%struct.usb_hcd* %hcd) {
entry:
- %tmp14 = getelementptr %struct.usb_hcd* %hcd, i32 0, i32 0, i32 0 ; <%struct.device**> [#uses=1]
- %tmp15 = load %struct.device** %tmp14, align 8 ; <%struct.device*> [#uses=0]
+ %tmp14 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 0, i32 0 ; <%struct.device**> [#uses=1]
+ %tmp15 = load %struct.device*, %struct.device** %tmp14, align 8 ; <%struct.device*> [#uses=0]
br i1 false, label %bb25, label %return
bb25: ; preds = %entry
br i1 false, label %cond_true, label %return
cond_true: ; preds = %bb25
- %tmp601 = getelementptr %struct.usb_hcd* %hcd, i32 0, i32 1, i64 2305843009213693951 ; <i64*> [#uses=1]
+ %tmp601 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 1, i64 2305843009213693951 ; <i64*> [#uses=1]
%tmp67 = bitcast i64* %tmp601 to %struct.device** ; <%struct.device**> [#uses=1]
- %tmp68 = load %struct.device** %tmp67, align 8 ; <%struct.device*> [#uses=0]
+ %tmp68 = load %struct.device*, %struct.device** %tmp67, align 8 ; <%struct.device*> [#uses=0]
ret i32 undef
return: ; preds = %bb25, %entry
diff --git a/test/Analysis/BasicAA/2008-04-15-Byval.ll b/test/Analysis/BasicAA/2008-04-15-Byval.ll
index 2ea0314..9df12bd 100644
--- a/test/Analysis/BasicAA/2008-04-15-Byval.ll
+++ b/test/Analysis/BasicAA/2008-04-15-Byval.ll
@@ -7,8 +7,8 @@ target triple = "i386-apple-darwin8"
define void @foo(%struct.x* byval align 4 %X) nounwind {
; CHECK: store i32 2, i32* %tmp1
entry:
- %tmp = getelementptr %struct.x* %X, i32 0, i32 0 ; <[4 x i32]*> [#uses=1]
- %tmp1 = getelementptr [4 x i32]* %tmp, i32 0, i32 3 ; <i32*> [#uses=1]
+ %tmp = getelementptr %struct.x, %struct.x* %X, i32 0, i32 0 ; <[4 x i32]*> [#uses=1]
+ %tmp1 = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 3 ; <i32*> [#uses=1]
store i32 2, i32* %tmp1, align 4
%tmp2 = call i32 (...)* @bar( %struct.x* byval align 4 %X ) nounwind ; <i32> [#uses=0]
br label %return
diff --git a/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll b/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll
index 1709144..7d2cde4 100644
--- a/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll
+++ b/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll
@@ -9,7 +9,7 @@ target triple = "i686-pc-linux-gnu"
define void @test291() nounwind {
entry:
- store i32 1138410269, i32* getelementptr ([5 x %struct.S291]* @a291, i32 0, i32 2, i32 1)
- %tmp54 = load i32* bitcast (%struct.S291* getelementptr ([5 x %struct.S291]* @a291, i32 0, i32 2) to i32*), align 4 ; <i32> [#uses=0]
+ store i32 1138410269, i32* getelementptr ([5 x %struct.S291], [5 x %struct.S291]* @a291, i32 0, i32 2, i32 1)
+ %tmp54 = load i32, i32* bitcast (%struct.S291* getelementptr ([5 x %struct.S291], [5 x %struct.S291]* @a291, i32 0, i32 2) to i32*), align 4 ; <i32> [#uses=0]
unreachable
}
diff --git a/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll b/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll
index 3db9a3f..49a742c 100644
--- a/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll
+++ b/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll
@@ -9,6 +9,6 @@ define i32 @foo() {
%B = call i32* @_Znwj(i32 4)
store i32 1, i32* %A
store i32 2, i32* %B
- %C = load i32* %A
+ %C = load i32, i32* %A
ret i32 %C
}
diff --git a/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll b/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll
index add7dee..65dcf5c 100644
--- a/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll
+++ b/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll
@@ -3,12 +3,12 @@
declare noalias i32* @noalias()
define i32 @test(i32 %x) {
-; CHECK: load i32* %a
+; CHECK: load i32, i32* %a
%a = call i32* @noalias()
store i32 1, i32* %a
- %b = getelementptr i32* %a, i32 %x
+ %b = getelementptr i32, i32* %a, i32 %x
store i32 2, i32* %b
- %c = load i32* %a
+ %c = load i32, i32* %a
ret i32 %c
}
diff --git a/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll b/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll
index 4b6a12e..97a9251 100644
--- a/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll
+++ b/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll
@@ -2,12 +2,12 @@
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
define i8 @foo(i8* %ptr) {
- %P = getelementptr i8* %ptr, i32 0
- %Q = getelementptr i8* %ptr, i32 1
+ %P = getelementptr i8, i8* %ptr, i32 0
+ %Q = getelementptr i8, i8* %ptr, i32 1
; CHECK: getelementptr
- %X = load i8* %P
+ %X = load i8, i8* %P
%Y = atomicrmw add i8* %Q, i8 1 monotonic
- %Z = load i8* %P
+ %Z = load i8, i8* %P
; CHECK-NOT: = load
%A = sub i8 %X, %Z
ret i8 %A
diff --git a/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll b/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll
index c546d68..43ee96c 100644
--- a/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll
+++ b/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll
@@ -15,7 +15,7 @@ entry:
br i1 %tmp, label %bb, label %bb1
bb:
- %b = getelementptr i32* %a, i32 0
+ %b = getelementptr i32, i32* %a, i32 0
br label %bb2
bb1:
@@ -23,9 +23,9 @@ bb1:
bb2:
%P = phi i32* [ %b, %bb ], [ @Y, %bb1 ]
- %tmp1 = load i32* @Z, align 4
+ %tmp1 = load i32, i32* @Z, align 4
store i32 123, i32* %P, align 4
- %tmp2 = load i32* @Z, align 4
+ %tmp2 = load i32, i32* @Z, align 4
br label %return
return:
diff --git a/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll b/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll
index 6656980..b2e7a60 100644
--- a/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll
+++ b/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll
@@ -8,10 +8,10 @@ target datalayout = "e-p:32:32:32"
define i32 @test(i32* %tab, i32 %indvar) nounwind {
%tmp31 = mul i32 %indvar, -2
%tmp32 = add i32 %tmp31, 30
- %t.5 = getelementptr i32* %tab, i32 %tmp32
- %loada = load i32* %tab
+ %t.5 = getelementptr i32, i32* %tab, i32 %tmp32
+ %loada = load i32, i32* %tab
store i32 0, i32* %t.5
- %loadb = load i32* %tab
+ %loadb = load i32, i32* %tab
%rval = add i32 %loada, %loadb
ret i32 %rval
}
diff --git a/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll b/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll
index bc2512e..08db5ec 100644
--- a/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll
+++ b/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll
@@ -10,27 +10,27 @@ target datalayout = "e"
define i32 @main() {
%t = alloca %struct.foo, align 4
- %1 = getelementptr inbounds %struct.foo* %t, i32 0, i32 0
+ %1 = getelementptr inbounds %struct.foo, %struct.foo* %t, i32 0, i32 0
store i32 1, i32* %1, align 4
- %2 = getelementptr inbounds %struct.foo* %t, i64 1
+ %2 = getelementptr inbounds %struct.foo, %struct.foo* %t, i64 1
%3 = bitcast %struct.foo* %2 to i8*
- %4 = getelementptr inbounds i8* %3, i32 -1
+ %4 = getelementptr inbounds i8, i8* %3, i32 -1
store i8 0, i8* %4
- %5 = getelementptr inbounds i8* %4, i32 -1
+ %5 = getelementptr inbounds i8, i8* %4, i32 -1
store i8 0, i8* %5
- %6 = getelementptr inbounds i8* %5, i32 -1
+ %6 = getelementptr inbounds i8, i8* %5, i32 -1
store i8 0, i8* %6
- %7 = getelementptr inbounds i8* %6, i32 -1
+ %7 = getelementptr inbounds i8, i8* %6, i32 -1
store i8 0, i8* %7
- %8 = getelementptr inbounds i8* %7, i32 -1
+ %8 = getelementptr inbounds i8, i8* %7, i32 -1
store i8 0, i8* %8
- %9 = getelementptr inbounds i8* %8, i32 -1
+ %9 = getelementptr inbounds i8, i8* %8, i32 -1
store i8 0, i8* %9
- %10 = getelementptr inbounds i8* %9, i32 -1
+ %10 = getelementptr inbounds i8, i8* %9, i32 -1
store i8 0, i8* %10
- %11 = getelementptr inbounds i8* %10, i32 -1
+ %11 = getelementptr inbounds i8, i8* %10, i32 -1
store i8 0, i8* %11
- %12 = load i32* %1, align 4
+ %12 = load i32, i32* %1, align 4
ret i32 %12
; CHECK: ret i32 %12
}
diff --git a/test/Analysis/BasicAA/aligned-overread.ll b/test/Analysis/BasicAA/aligned-overread.ll
index b05f8eb..aa0a83e 100644
--- a/test/Analysis/BasicAA/aligned-overread.ll
+++ b/test/Analysis/BasicAA/aligned-overread.ll
@@ -9,11 +9,11 @@ target triple = "x86_64-apple-macosx10.8.0"
define i32 @main() nounwind uwtable ssp {
entry:
- %tmp = load i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
+ %tmp = load i8, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
%tmp1 = or i8 %tmp, -128
- store i8 %tmp1, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
- %tmp2 = load i64* bitcast ({ i8, i8, i8, i8, i8 }* @a to i64*), align 8
- store i8 11, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
+ store i8 %tmp1, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
+ %tmp2 = load i64, i64* bitcast ({ i8, i8, i8, i8, i8 }* @a to i64*), align 8
+ store i8 11, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
%tmp3 = trunc i64 %tmp2 to i32
ret i32 %tmp3
diff --git a/test/Analysis/BasicAA/args-rets-allocas-loads.ll b/test/Analysis/BasicAA/args-rets-allocas-loads.ll
index 066f46b..05b56a0 100644
--- a/test/Analysis/BasicAA/args-rets-allocas-loads.ll
+++ b/test/Analysis/BasicAA/args-rets-allocas-loads.ll
@@ -22,8 +22,8 @@ define void @caller_a(double* %arg_a0,
%noalias_ret_a0 = call double* @noalias_returner()
%noalias_ret_a1 = call double* @noalias_returner()
- %loaded_a0 = load double** %indirect_a0
- %loaded_a1 = load double** %indirect_a1
+ %loaded_a0 = load double*, double** %indirect_a0
+ %loaded_a1 = load double*, double** %indirect_a1
call void @callee(double* %escape_alloca_a0)
call void @callee(double* %escape_alloca_a1)
diff --git a/test/Analysis/BasicAA/byval.ll b/test/Analysis/BasicAA/byval.ll
index 673fee0..edbe7b3 100644
--- a/test/Analysis/BasicAA/byval.ll
+++ b/test/Analysis/BasicAA/byval.ll
@@ -7,10 +7,10 @@ target triple = "i686-apple-darwin8"
define i32 @foo(%struct.x* byval %a) nounwind {
; CHECK: ret i32 1
%tmp1 = tail call i32 (...)* @bar( %struct.x* %a ) nounwind ; <i32> [#uses=0]
- %tmp2 = getelementptr %struct.x* %a, i32 0, i32 0 ; <i32*> [#uses=2]
+ %tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0 ; <i32*> [#uses=2]
store i32 1, i32* %tmp2, align 4
store i32 2, i32* @g, align 4
- %tmp4 = load i32* %tmp2, align 4 ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* %tmp2, align 4 ; <i32> [#uses=1]
ret i32 %tmp4
}
diff --git a/test/Analysis/BasicAA/cas.ll b/test/Analysis/BasicAA/cas.ll
index d0cd9f4..b770cb7 100644
--- a/test/Analysis/BasicAA/cas.ll
+++ b/test/Analysis/BasicAA/cas.ll
@@ -6,9 +6,9 @@
; CHECK: ret i32 0
define i32 @main() {
- %a = load i32* @flag0
+ %a = load i32, i32* @flag0
%b = atomicrmw xchg i32* @turn, i32 1 monotonic
- %c = load i32* @flag0
+ %c = load i32, i32* @flag0
%d = sub i32 %a, %c
ret i32 %d
}
diff --git a/test/Analysis/BasicAA/constant-over-index.ll b/test/Analysis/BasicAA/constant-over-index.ll
index aeb068b..f5e2c7c 100644
--- a/test/Analysis/BasicAA/constant-over-index.ll
+++ b/test/Analysis/BasicAA/constant-over-index.ll
@@ -11,13 +11,13 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @foo([3 x [3 x double]]* noalias %p) {
entry:
- %p3 = getelementptr [3 x [3 x double]]* %p, i64 0, i64 0, i64 3
+ %p3 = getelementptr [3 x [3 x double]], [3 x [3 x double]]* %p, i64 0, i64 0, i64 3
br label %loop
loop:
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
- %p.0.i.0 = getelementptr [3 x [3 x double]]* %p, i64 0, i64 %i, i64 0
+ %p.0.i.0 = getelementptr [3 x [3 x double]], [3 x [3 x double]]* %p, i64 0, i64 %i, i64 0
store volatile double 0.0, double* %p3
store volatile double 0.1, double* %p.0.i.0
diff --git a/test/Analysis/BasicAA/cs-cs.ll b/test/Analysis/BasicAA/cs-cs.ll
index 693634c..78670b6 100644
--- a/test/Analysis/BasicAA/cs-cs.ll
+++ b/test/Analysis/BasicAA/cs-cs.ll
@@ -12,7 +12,7 @@ declare void @a_readonly_func(i8 *) noinline nounwind readonly
define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
entry:
- %q = getelementptr i8* %p, i64 16
+ %q = getelementptr i8, i8* %p, i64 16
%a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
%b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
@@ -70,7 +70,7 @@ define void @test2a(i8* noalias %P, i8* noalias %Q) nounwind ssp {
define void @test2b(i8* noalias %P, i8* noalias %Q) nounwind ssp {
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
- %R = getelementptr i8* %P, i64 12
+ %R = getelementptr i8, i8* %P, i64 12
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
ret void
@@ -91,7 +91,7 @@ define void @test2b(i8* noalias %P, i8* noalias %Q) nounwind ssp {
define void @test2c(i8* noalias %P, i8* noalias %Q) nounwind ssp {
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
- %R = getelementptr i8* %P, i64 11
+ %R = getelementptr i8, i8* %P, i64 11
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
ret void
@@ -112,7 +112,7 @@ define void @test2c(i8* noalias %P, i8* noalias %Q) nounwind ssp {
define void @test2d(i8* noalias %P, i8* noalias %Q) nounwind ssp {
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
- %R = getelementptr i8* %P, i64 -12
+ %R = getelementptr i8, i8* %P, i64 -12
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
ret void
@@ -133,7 +133,7 @@ define void @test2d(i8* noalias %P, i8* noalias %Q) nounwind ssp {
define void @test2e(i8* noalias %P, i8* noalias %Q) nounwind ssp {
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
- %R = getelementptr i8* %P, i64 -11
+ %R = getelementptr i8, i8* %P, i64 -11
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
ret void
diff --git a/test/Analysis/BasicAA/dag.ll b/test/Analysis/BasicAA/dag.ll
index 1d2f6f1..63e2c1a 100644
--- a/test/Analysis/BasicAA/dag.ll
+++ b/test/Analysis/BasicAA/dag.ll
@@ -36,6 +36,6 @@ xc:
%bigbase = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase
- %loaded = load i8* %phi
+ %loaded = load i8, i8* %phi
ret i8 %loaded
}
diff --git a/test/Analysis/BasicAA/featuretest.ll b/test/Analysis/BasicAA/featuretest.ll
index 47d278f..c621d0e 100644
--- a/test/Analysis/BasicAA/featuretest.ll
+++ b/test/Analysis/BasicAA/featuretest.ll
@@ -18,13 +18,13 @@ define i32 @different_array_test(i64 %A, i64 %B) {
call void @external(i32* %Array1)
call void @external(i32* %Array2)
- %pointer = getelementptr i32* %Array1, i64 %A
- %val = load i32* %pointer
+ %pointer = getelementptr i32, i32* %Array1, i64 %A
+ %val = load i32, i32* %pointer
- %pointer2 = getelementptr i32* %Array2, i64 %B
+ %pointer2 = getelementptr i32, i32* %Array2, i64 %B
store i32 7, i32* %pointer2
- %REMOVE = load i32* %pointer ; redundant with above load
+ %REMOVE = load i32, i32* %pointer ; redundant with above load
%retval = sub i32 %REMOVE, %val
ret i32 %retval
; CHECK: @different_array_test
@@ -38,12 +38,12 @@ define i32 @constant_array_index_test() {
%Array = alloca i32, i32 100
call void @external(i32* %Array)
- %P1 = getelementptr i32* %Array, i64 7
- %P2 = getelementptr i32* %Array, i64 6
+ %P1 = getelementptr i32, i32* %Array, i64 7
+ %P2 = getelementptr i32, i32* %Array, i64 6
- %A = load i32* %P1
+ %A = load i32, i32* %P1
store i32 1, i32* %P2 ; Should not invalidate load
- %BREMOVE = load i32* %P1
+ %BREMOVE = load i32, i32* %P1
%Val = sub i32 %A, %BREMOVE
ret i32 %Val
; CHECK: @constant_array_index_test
@@ -53,10 +53,10 @@ define i32 @constant_array_index_test() {
; Test that if two pointers are spaced out by a constant getelementptr, that
; they cannot alias.
define i32 @gep_distance_test(i32* %A) {
- %REMOVEu = load i32* %A
- %B = getelementptr i32* %A, i64 2 ; Cannot alias A
+ %REMOVEu = load i32, i32* %A
+ %B = getelementptr i32, i32* %A, i64 2 ; Cannot alias A
store i32 7, i32* %B
- %REMOVEv = load i32* %A
+ %REMOVEv = load i32, i32* %A
%r = sub i32 %REMOVEu, %REMOVEv
ret i32 %r
; CHECK: @gep_distance_test
@@ -66,11 +66,11 @@ define i32 @gep_distance_test(i32* %A) {
; Test that if two pointers are spaced out by a constant offset, that they
; cannot alias, even if there is a variable offset between them...
define i32 @gep_distance_test2({i32,i32}* %A, i64 %distance) {
- %A1 = getelementptr {i32,i32}* %A, i64 0, i32 0
- %REMOVEu = load i32* %A1
- %B = getelementptr {i32,i32}* %A, i64 %distance, i32 1
+ %A1 = getelementptr {i32,i32}, {i32,i32}* %A, i64 0, i32 0
+ %REMOVEu = load i32, i32* %A1
+ %B = getelementptr {i32,i32}, {i32,i32}* %A, i64 %distance, i32 1
store i32 7, i32* %B ; B cannot alias A, it's at least 4 bytes away
- %REMOVEv = load i32* %A1
+ %REMOVEv = load i32, i32* %A1
%r = sub i32 %REMOVEu, %REMOVEv
ret i32 %r
; CHECK: @gep_distance_test2
@@ -80,11 +80,11 @@ define i32 @gep_distance_test2({i32,i32}* %A, i64 %distance) {
; Test that we can do funny pointer things and that distance calc will still
; work.
define i32 @gep_distance_test3(i32 * %A) {
- %X = load i32* %A
+ %X = load i32, i32* %A
%B = bitcast i32* %A to i8*
- %C = getelementptr i8* %B, i64 4
+ %C = getelementptr i8, i8* %B, i64 4
store i8 42, i8* %C
- %Y = load i32* %A
+ %Y = load i32, i32* %A
%R = sub i32 %X, %Y
ret i32 %R
; CHECK: @gep_distance_test3
@@ -96,9 +96,9 @@ define i32 @constexpr_test() {
%X = alloca i32
call void @external(i32* %X)
- %Y = load i32* %X
- store i32 5, i32* getelementptr ({ i32 }* @Global, i64 0, i32 0)
- %REMOVE = load i32* %X
+ %Y = load i32, i32* %X
+ store i32 5, i32* getelementptr ({ i32 }, { i32 }* @Global, i64 0, i32 0)
+ %REMOVE = load i32, i32* %X
%retval = sub i32 %Y, %REMOVE
ret i32 %retval
; CHECK: @constexpr_test
@@ -112,13 +112,13 @@ define i32 @constexpr_test() {
define i16 @zext_sext_confusion(i16* %row2col, i5 %j) nounwind{
entry:
%sum5.cast = zext i5 %j to i64 ; <i64> [#uses=1]
- %P1 = getelementptr i16* %row2col, i64 %sum5.cast
- %row2col.load.1.2 = load i16* %P1, align 1 ; <i16> [#uses=1]
+ %P1 = getelementptr i16, i16* %row2col, i64 %sum5.cast
+ %row2col.load.1.2 = load i16, i16* %P1, align 1 ; <i16> [#uses=1]
%sum13.cast31 = sext i5 %j to i6 ; <i6> [#uses=1]
%sum13.cast = zext i6 %sum13.cast31 to i64 ; <i64> [#uses=1]
- %P2 = getelementptr i16* %row2col, i64 %sum13.cast
- %row2col.load.1.6 = load i16* %P2, align 1 ; <i16> [#uses=1]
+ %P2 = getelementptr i16, i16* %row2col, i64 %sum13.cast
+ %row2col.load.1.6 = load i16, i16* %P2, align 1 ; <i16> [#uses=1]
%.ret = sub i16 %row2col.load.1.6, %row2col.load.1.2 ; <i16> [#uses=1]
ret i16 %.ret
diff --git a/test/Analysis/BasicAA/full-store-partial-alias.ll b/test/Analysis/BasicAA/full-store-partial-alias.ll
index 9699d92..341f6ba 100644
--- a/test/Analysis/BasicAA/full-store-partial-alias.ll
+++ b/test/Analysis/BasicAA/full-store-partial-alias.ll
@@ -18,13 +18,13 @@ define i32 @signbit(double %x) nounwind {
; CHECK: ret i32 0
entry:
%u = alloca %union.anon, align 8
- %tmp9 = getelementptr inbounds %union.anon* %u, i64 0, i32 0
+ %tmp9 = getelementptr inbounds %union.anon, %union.anon* %u, i64 0, i32 0
store double %x, double* %tmp9, align 8, !tbaa !0
- %tmp2 = load i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3
+ %tmp2 = load i32, i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3
%idxprom = sext i32 %tmp2 to i64
%tmp4 = bitcast %union.anon* %u to [2 x i32]*
- %arrayidx = getelementptr inbounds [2 x i32]* %tmp4, i64 0, i64 %idxprom
- %tmp5 = load i32* %arrayidx, align 4, !tbaa !3
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %tmp4, i64 0, i64 %idxprom
+ %tmp5 = load i32, i32* %arrayidx, align 4, !tbaa !3
%tmp5.lobit = lshr i32 %tmp5, 31
ret i32 %tmp5.lobit
}
diff --git a/test/Analysis/BasicAA/gcsetest.ll b/test/Analysis/BasicAA/gcsetest.ll
index 64792eb..cf6ab71 100644
--- a/test/Analysis/BasicAA/gcsetest.ll
+++ b/test/Analysis/BasicAA/gcsetest.ll
@@ -12,11 +12,11 @@
; CHECK-NEXT: ret i32 0
define i32 @test() {
- %A1 = load i32* @A
+ %A1 = load i32, i32* @A
store i32 123, i32* @B ; Store cannot alias @A
- %A2 = load i32* @A
+ %A2 = load i32, i32* @A
%X = sub i32 %A1, %A2
ret i32 %X
}
@@ -30,13 +30,13 @@ define i32 @test() {
; CHECK-NEXT: ret i32 0
define i32 @test2() {
- %A1 = load i32* @A
+ %A1 = load i32, i32* @A
br label %Loop
Loop:
%AP = phi i32 [0, %0], [%X, %Loop]
store i32 %AP, i32* @B ; Store cannot alias @A
- %A2 = load i32* @A
+ %A2 = load i32, i32* @A
%X = sub i32 %A1, %A2
%c = icmp eq i32 %X, 0
br i1 %c, label %out, label %Loop
@@ -55,7 +55,7 @@ define i32 @test3() {
%X = alloca i32
store i32 7, i32* %X
call void @external()
- %V = load i32* %X
+ %V = load i32, i32* %X
ret i32 %V
}
diff --git a/test/Analysis/BasicAA/gep-alias.ll b/test/Analysis/BasicAA/gep-alias.ll
index 2c0d467..f686010 100644
--- a/test/Analysis/BasicAA/gep-alias.ll
+++ b/test/Analysis/BasicAA/gep-alias.ll
@@ -6,12 +6,12 @@ target datalayout = "e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-
define i32 @test1(i8 * %P) {
entry:
%Q = bitcast i8* %P to {i32, i32}*
- %R = getelementptr {i32, i32}* %Q, i32 0, i32 1
- %S = load i32* %R
+ %R = getelementptr {i32, i32}, {i32, i32}* %Q, i32 0, i32 1
+ %S = load i32, i32* %R
%q = bitcast i8* %P to {i32, i32}*
- %r = getelementptr {i32, i32}* %q, i32 0, i32 1
- %s = load i32* %r
+ %r = getelementptr {i32, i32}, {i32, i32}* %q, i32 0, i32 1
+ %s = load i32, i32* %r
%t = sub i32 %S, %s
ret i32 %t
@@ -22,13 +22,13 @@ entry:
define i32 @test2(i8 * %P) {
entry:
%Q = bitcast i8* %P to {i32, i32, i32}*
- %R = getelementptr {i32, i32, i32}* %Q, i32 0, i32 1
- %S = load i32* %R
+ %R = getelementptr {i32, i32, i32}, {i32, i32, i32}* %Q, i32 0, i32 1
+ %S = load i32, i32* %R
- %r = getelementptr {i32, i32, i32}* %Q, i32 0, i32 2
+ %r = getelementptr {i32, i32, i32}, {i32, i32, i32}* %Q, i32 0, i32 2
store i32 42, i32* %r
- %s = load i32* %R
+ %s = load i32, i32* %R
%t = sub i32 %S, %s
ret i32 %t
@@ -40,14 +40,14 @@ entry:
; This was a miscompilation.
define i32 @test3({float, {i32, i32, i32}}* %P) {
entry:
- %P2 = getelementptr {float, {i32, i32, i32}}* %P, i32 0, i32 1
- %R = getelementptr {i32, i32, i32}* %P2, i32 0, i32 1
- %S = load i32* %R
+ %P2 = getelementptr {float, {i32, i32, i32}}, {float, {i32, i32, i32}}* %P, i32 0, i32 1
+ %R = getelementptr {i32, i32, i32}, {i32, i32, i32}* %P2, i32 0, i32 1
+ %S = load i32, i32* %R
- %r = getelementptr {i32, i32, i32}* %P2, i32 0, i32 2
+ %r = getelementptr {i32, i32, i32}, {i32, i32, i32}* %P2, i32 0, i32 2
store i32 42, i32* %r
- %s = load i32* %R
+ %s = load i32, i32* %R
%t = sub i32 %S, %s
ret i32 %t
@@ -62,11 +62,11 @@ entry:
define i32 @test4(%SmallPtrSet64* %P) {
entry:
- %tmp2 = getelementptr inbounds %SmallPtrSet64* %P, i64 0, i32 0, i32 1
+ %tmp2 = getelementptr inbounds %SmallPtrSet64, %SmallPtrSet64* %P, i64 0, i32 0, i32 1
store i32 64, i32* %tmp2, align 8
- %tmp3 = getelementptr inbounds %SmallPtrSet64* %P, i64 0, i32 0, i32 4, i64 64
+ %tmp3 = getelementptr inbounds %SmallPtrSet64, %SmallPtrSet64* %P, i64 0, i32 0, i32 4, i64 64
store i8* null, i8** %tmp3, align 8
- %tmp4 = load i32* %tmp2, align 8
+ %tmp4 = load i32, i32* %tmp2, align 8
ret i32 %tmp4
; CHECK-LABEL: @test4(
; CHECK: ret i32 64
@@ -74,12 +74,12 @@ entry:
; P[i] != p[i+1]
define i32 @test5(i32* %p, i64 %i) {
- %pi = getelementptr i32* %p, i64 %i
+ %pi = getelementptr i32, i32* %p, i64 %i
%i.next = add i64 %i, 1
- %pi.next = getelementptr i32* %p, i64 %i.next
- %x = load i32* %pi
+ %pi.next = getelementptr i32, i32* %p, i64 %i.next
+ %x = load i32, i32* %pi
store i32 42, i32* %pi.next
- %y = load i32* %pi
+ %y = load i32, i32* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test5(
@@ -87,12 +87,12 @@ define i32 @test5(i32* %p, i64 %i) {
}
define i32 @test5_as1_smaller_size(i32 addrspace(1)* %p, i8 %i) {
- %pi = getelementptr i32 addrspace(1)* %p, i8 %i
+ %pi = getelementptr i32, i32 addrspace(1)* %p, i8 %i
%i.next = add i8 %i, 1
- %pi.next = getelementptr i32 addrspace(1)* %p, i8 %i.next
- %x = load i32 addrspace(1)* %pi
+ %pi.next = getelementptr i32, i32 addrspace(1)* %p, i8 %i.next
+ %x = load i32, i32 addrspace(1)* %pi
store i32 42, i32 addrspace(1)* %pi.next
- %y = load i32 addrspace(1)* %pi
+ %y = load i32, i32 addrspace(1)* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test5_as1_smaller_size(
@@ -101,12 +101,12 @@ define i32 @test5_as1_smaller_size(i32 addrspace(1)* %p, i8 %i) {
}
define i32 @test5_as1_same_size(i32 addrspace(1)* %p, i16 %i) {
- %pi = getelementptr i32 addrspace(1)* %p, i16 %i
+ %pi = getelementptr i32, i32 addrspace(1)* %p, i16 %i
%i.next = add i16 %i, 1
- %pi.next = getelementptr i32 addrspace(1)* %p, i16 %i.next
- %x = load i32 addrspace(1)* %pi
+ %pi.next = getelementptr i32, i32 addrspace(1)* %p, i16 %i.next
+ %x = load i32, i32 addrspace(1)* %pi
store i32 42, i32 addrspace(1)* %pi.next
- %y = load i32 addrspace(1)* %pi
+ %y = load i32, i32 addrspace(1)* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test5_as1_same_size(
@@ -116,12 +116,12 @@ define i32 @test5_as1_same_size(i32 addrspace(1)* %p, i16 %i) {
; P[i] != p[(i*4)|1]
define i32 @test6(i32* %p, i64 %i1) {
%i = shl i64 %i1, 2
- %pi = getelementptr i32* %p, i64 %i
+ %pi = getelementptr i32, i32* %p, i64 %i
%i.next = or i64 %i, 1
- %pi.next = getelementptr i32* %p, i64 %i.next
- %x = load i32* %pi
+ %pi.next = getelementptr i32, i32* %p, i64 %i.next
+ %x = load i32, i32* %pi
store i32 42, i32* %pi.next
- %y = load i32* %pi
+ %y = load i32, i32* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test6(
@@ -130,12 +130,12 @@ define i32 @test6(i32* %p, i64 %i1) {
; P[1] != P[i*4]
define i32 @test7(i32* %p, i64 %i) {
- %pi = getelementptr i32* %p, i64 1
+ %pi = getelementptr i32, i32* %p, i64 1
%i.next = shl i64 %i, 2
- %pi.next = getelementptr i32* %p, i64 %i.next
- %x = load i32* %pi
+ %pi.next = getelementptr i32, i32* %p, i64 %i.next
+ %x = load i32, i32* %pi
store i32 42, i32* %pi.next
- %y = load i32* %pi
+ %y = load i32, i32* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test7(
@@ -146,13 +146,13 @@ define i32 @test7(i32* %p, i64 %i) {
; PR1143
define i32 @test8(i32* %p, i16 %i) {
%i1 = zext i16 %i to i32
- %pi = getelementptr i32* %p, i32 %i1
+ %pi = getelementptr i32, i32* %p, i32 %i1
%i.next = add i16 %i, 1
%i.next2 = zext i16 %i.next to i32
- %pi.next = getelementptr i32* %p, i32 %i.next2
- %x = load i32* %pi
+ %pi.next = getelementptr i32, i32* %p, i32 %i.next2
+ %x = load i32, i32* %pi
store i32 42, i32* %pi.next
- %y = load i32* %pi
+ %y = load i32, i32* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test8(
@@ -163,16 +163,16 @@ define i8 @test9([4 x i8] *%P, i32 %i, i32 %j) {
%i2 = shl i32 %i, 2
%i3 = add i32 %i2, 1
; P2 = P + 1 + 4*i
- %P2 = getelementptr [4 x i8] *%P, i32 0, i32 %i3
+ %P2 = getelementptr [4 x i8], [4 x i8] *%P, i32 0, i32 %i3
%j2 = shl i32 %j, 2
; P4 = P + 4*j
- %P4 = getelementptr [4 x i8]* %P, i32 0, i32 %j2
+ %P4 = getelementptr [4 x i8], [4 x i8]* %P, i32 0, i32 %j2
- %x = load i8* %P2
+ %x = load i8, i8* %P2
store i8 42, i8* %P4
- %y = load i8* %P2
+ %y = load i8, i8* %P2
%z = sub i8 %x, %y
ret i8 %z
; CHECK-LABEL: @test9(
@@ -183,14 +183,14 @@ define i8 @test10([4 x i8] *%P, i32 %i) {
%i2 = shl i32 %i, 2
%i3 = add i32 %i2, 4
; P2 = P + 4 + 4*i
- %P2 = getelementptr [4 x i8] *%P, i32 0, i32 %i3
+ %P2 = getelementptr [4 x i8], [4 x i8] *%P, i32 0, i32 %i3
; P4 = P + 4*i
- %P4 = getelementptr [4 x i8]* %P, i32 0, i32 %i2
+ %P4 = getelementptr [4 x i8], [4 x i8]* %P, i32 0, i32 %i2
- %x = load i8* %P2
+ %x = load i8, i8* %P2
store i8 42, i8* %P4
- %y = load i8* %P2
+ %y = load i8, i8* %P2
%z = sub i8 %x, %y
ret i8 %z
; CHECK-LABEL: @test10(
@@ -201,13 +201,13 @@ define i8 @test10([4 x i8] *%P, i32 %i) {
define float @test11(i32 %indvar, [4 x [2 x float]]* %q) nounwind ssp {
%tmp = mul i32 %indvar, -1
%dec = add i32 %tmp, 3
- %scevgep = getelementptr [4 x [2 x float]]* %q, i32 0, i32 %dec
+ %scevgep = getelementptr [4 x [2 x float]], [4 x [2 x float]]* %q, i32 0, i32 %dec
%scevgep35 = bitcast [2 x float]* %scevgep to i64*
- %arrayidx28 = getelementptr inbounds [4 x [2 x float]]* %q, i32 0, i32 0
- %y29 = getelementptr inbounds [2 x float]* %arrayidx28, i32 0, i32 1
+ %arrayidx28 = getelementptr inbounds [4 x [2 x float]], [4 x [2 x float]]* %q, i32 0, i32 0
+ %y29 = getelementptr inbounds [2 x float], [2 x float]* %arrayidx28, i32 0, i32 1
store float 1.0, float* %y29, align 4
store i64 0, i64* %scevgep35, align 4
- %tmp30 = load float* %y29, align 4
+ %tmp30 = load float, float* %y29, align 4
ret float %tmp30
; CHECK-LABEL: @test11(
; CHECK: ret float %tmp30
@@ -216,14 +216,14 @@ define float @test11(i32 %indvar, [4 x [2 x float]]* %q) nounwind ssp {
; (This was a miscompilation.)
define i32 @test12(i32 %x, i32 %y, i8* %p) nounwind {
%a = bitcast i8* %p to [13 x i8]*
- %b = getelementptr [13 x i8]* %a, i32 %x
+ %b = getelementptr [13 x i8], [13 x i8]* %a, i32 %x
%c = bitcast [13 x i8]* %b to [15 x i8]*
- %d = getelementptr [15 x i8]* %c, i32 %y, i32 8
+ %d = getelementptr [15 x i8], [15 x i8]* %c, i32 %y, i32 8
%castd = bitcast i8* %d to i32*
%castp = bitcast i8* %p to i32*
store i32 1, i32* %castp
store i32 0, i32* %castd
- %r = load i32* %castp
+ %r = load i32, i32* %castp
ret i32 %r
; CHECK-LABEL: @test12(
; CHECK: ret i32 %r
diff --git a/test/Analysis/BasicAA/global-size.ll b/test/Analysis/BasicAA/global-size.ll
index f081cb1..bacf3bc 100644
--- a/test/Analysis/BasicAA/global-size.ll
+++ b/test/Analysis/BasicAA/global-size.ll
@@ -8,9 +8,9 @@ target datalayout = "E-p:64:64:64-p1:16:16:16-a0:0:8-f32:32:32-f64:64:64-i1:8:8-
; CHECK-LABEL: @test1(
define i16 @test1(i32* %P) {
- %X = load i16* @B
+ %X = load i16, i16* @B
store i32 7, i32* %P
- %Y = load i16* @B
+ %Y = load i16, i16* @B
%Z = sub i16 %Y, %X
ret i16 %Z
; CHECK: ret i16 0
@@ -21,9 +21,9 @@ define i16 @test1(i32* %P) {
define i16 @test1_as1(i32 addrspace(1)* %P) {
; CHECK-LABEL: @test1_as1(
; CHECK: ret i16 0
- %X = load i16 addrspace(1)* @B_as1
+ %X = load i16, i16 addrspace(1)* @B_as1
store i32 7, i32 addrspace(1)* %P
- %Y = load i16 addrspace(1)* @B_as1
+ %Y = load i16, i16 addrspace(1)* @B_as1
%Z = sub i16 %Y, %X
ret i16 %Z
}
@@ -35,14 +35,14 @@ define i16 @test1_as1(i32 addrspace(1)* %P) {
; CHECK-LABEL: @test2(
define i8 @test2(i32 %tmp79, i32 %w.2, i32 %indvar89) nounwind {
%tmp92 = add i32 %tmp79, %indvar89
- %arrayidx412 = getelementptr [0 x i8]* @window, i32 0, i32 %tmp92
+ %arrayidx412 = getelementptr [0 x i8], [0 x i8]* @window, i32 0, i32 %tmp92
%tmp93 = add i32 %w.2, %indvar89
- %arrayidx416 = getelementptr [0 x i8]* @window, i32 0, i32 %tmp93
+ %arrayidx416 = getelementptr [0 x i8], [0 x i8]* @window, i32 0, i32 %tmp93
- %A = load i8* %arrayidx412, align 1
+ %A = load i8, i8* %arrayidx412, align 1
store i8 4, i8* %arrayidx416, align 1
- %B = load i8* %arrayidx412, align 1
+ %B = load i8, i8* %arrayidx412, align 1
%C = sub i8 %A, %B
ret i8 %C
diff --git a/test/Analysis/BasicAA/intrinsics.ll b/test/Analysis/BasicAA/intrinsics.ll
index c1cf587..8c05587 100644
--- a/test/Analysis/BasicAA/intrinsics.ll
+++ b/test/Analysis/BasicAA/intrinsics.ll
@@ -21,13 +21,13 @@ entry:
; CHECK: define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: %q = getelementptr i8* %p, i64 16
+; CHECK-NEXT: %q = getelementptr i8, i8* %p, i64 16
; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) [[ATTR]]
; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
; CHECK-NEXT: %c = add <8 x i16> %a, %a
define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
entry:
- %q = getelementptr i8* %p, i64 16
+ %q = getelementptr i8, i8* %p, i64 16
%a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
%b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
diff --git a/test/Analysis/BasicAA/invariant_load.ll b/test/Analysis/BasicAA/invariant_load.ll
index bc629cd..722fb5b 100644
--- a/test/Analysis/BasicAA/invariant_load.ll
+++ b/test/Analysis/BasicAA/invariant_load.ll
@@ -10,15 +10,15 @@
define i32 @foo(i32* nocapture %p, i8* nocapture %q) {
entry:
- %0 = load i32* %p, align 4, !invariant.load !3
+ %0 = load i32, i32* %p, align 4, !invariant.load !3
%conv = trunc i32 %0 to i8
store i8 %conv, i8* %q, align 1
- %1 = load i32* %p, align 4, !invariant.load !3
+ %1 = load i32, i32* %p, align 4, !invariant.load !3
%add = add nsw i32 %1, 1
ret i32 %add
; CHECK: foo
-; CHECK: %0 = load i32* %p
+; CHECK: %0 = load i32, i32* %p
; CHECK: store i8 %conv, i8* %q,
; CHECK: %add = add nsw i32 %0, 1
}
diff --git a/test/Analysis/BasicAA/memset_pattern.ll b/test/Analysis/BasicAA/memset_pattern.ll
index 590664c..25bdb2e 100644
--- a/test/Analysis/BasicAA/memset_pattern.ll
+++ b/test/Analysis/BasicAA/memset_pattern.ll
@@ -13,7 +13,7 @@ entry:
store i32 1, i32* @z
tail call void @memset_pattern16(i8* bitcast (i32* @y to i8*), i8* bitcast (i32* @x to i8*), i64 4) nounwind
; CHECK-NOT: load
- %l = load i32* @z
+ %l = load i32, i32* @z
; CHECK: ret i32 1
ret i32 %l
}
diff --git a/test/Analysis/BasicAA/modref.ll b/test/Analysis/BasicAA/modref.ll
index 0d8bf71..e124d6c 100644
--- a/test/Analysis/BasicAA/modref.ll
+++ b/test/Analysis/BasicAA/modref.ll
@@ -13,7 +13,7 @@ define i32 @test0(i8* %P) {
call void @llvm.memset.p0i8.i32(i8* %P, i8 0, i32 42, i32 1, i1 false)
- %B = load i32* %A
+ %B = load i32, i32* %A
ret i32 %B
; CHECK-LABEL: @test0
@@ -29,30 +29,30 @@ define i8 @test1() {
call void @llvm.memcpy.p0i8.p0i8.i8(i8* %A, i8* %B, i8 -1, i32 0, i1 false)
- %C = load i8* %B
+ %C = load i8, i8* %B
ret i8 %C
; CHECK: ret i8 2
}
define i8 @test2(i8* %P) {
; CHECK-LABEL: @test2
- %P2 = getelementptr i8* %P, i32 127
+ %P2 = getelementptr i8, i8* %P, i32 127
store i8 1, i8* %P2 ;; Not dead across memset
call void @llvm.memset.p0i8.i8(i8* %P, i8 2, i8 127, i32 0, i1 false)
- %A = load i8* %P2
+ %A = load i8, i8* %P2
ret i8 %A
; CHECK: ret i8 1
}
define i8 @test2a(i8* %P) {
; CHECK-LABEL: @test2
- %P2 = getelementptr i8* %P, i32 126
+ %P2 = getelementptr i8, i8* %P, i32 126
;; FIXME: DSE isn't zapping this dead store.
store i8 1, i8* %P2 ;; Dead, clobbered by memset.
call void @llvm.memset.p0i8.i8(i8* %P, i8 2, i8 127, i32 0, i1 false)
- %A = load i8* %P2
+ %A = load i8, i8* %P2
ret i8 %A
; CHECK-NOT: load
; CHECK: ret i8 2
@@ -64,7 +64,7 @@ define void @test3(i8* %P, i8 %X) {
; CHECK-NOT: %Y
%Y = add i8 %X, 1 ;; Dead, because the only use (the store) is dead.
- %P2 = getelementptr i8* %P, i32 2
+ %P2 = getelementptr i8, i8* %P, i32 2
store i8 %Y, i8* %P2 ;; Not read by lifetime.end, should be removed.
; CHECK: store i8 2, i8* %P2
call void @llvm.lifetime.end(i64 1, i8* %P)
@@ -78,7 +78,7 @@ define void @test3a(i8* %P, i8 %X) {
; CHECK-LABEL: @test3a
%Y = add i8 %X, 1 ;; Dead, because the only use (the store) is dead.
- %P2 = getelementptr i8* %P, i32 2
+ %P2 = getelementptr i8, i8* %P, i32 2
store i8 %Y, i8* %P2
; CHECK-NEXT: call void @llvm.lifetime.end
call void @llvm.lifetime.end(i64 10, i8* %P)
@@ -90,9 +90,9 @@ define void @test3a(i8* %P, i8 %X) {
@G2 = external global [4000 x i32]
define i32 @test4(i8* %P) {
- %tmp = load i32* @G1
+ %tmp = load i32, i32* @G1
call void @llvm.memset.p0i8.i32(i8* bitcast ([4000 x i32]* @G2 to i8*), i8 0, i32 4000, i32 1, i1 false)
- %tmp2 = load i32* @G1
+ %tmp2 = load i32, i32* @G1
%sub = sub i32 %tmp2, %tmp
ret i32 %sub
; CHECK-LABEL: @test4
@@ -105,9 +105,9 @@ define i32 @test4(i8* %P) {
; Verify that basicaa is handling variable length memcpy, knowing it doesn't
; write to G1.
define i32 @test5(i8* %P, i32 %Len) {
- %tmp = load i32* @G1
+ %tmp = load i32, i32* @G1
call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([4000 x i32]* @G2 to i8*), i8* bitcast (i32* @G1 to i8*), i32 %Len, i32 1, i1 false)
- %tmp2 = load i32* @G1
+ %tmp2 = load i32, i32* @G1
%sub = sub i32 %tmp2, %tmp
ret i32 %sub
; CHECK: @test5
@@ -118,13 +118,13 @@ define i32 @test5(i8* %P, i32 %Len) {
}
define i8 @test6(i8* %p, i8* noalias %a) {
- %x = load i8* %a
+ %x = load i8, i8* %a
%t = va_arg i8* %p, float
- %y = load i8* %a
+ %y = load i8, i8* %a
%z = add i8 %x, %y
ret i8 %z
; CHECK-LABEL: @test6
-; CHECK: load i8* %a
+; CHECK: load i8, i8* %a
; CHECK-NOT: load
; CHECK: ret
}
@@ -135,14 +135,14 @@ define i32 @test7() nounwind uwtable ssp {
entry:
%x = alloca i32, align 4
store i32 0, i32* %x, align 4
- %add.ptr = getelementptr inbounds i32* %x, i64 1
+ %add.ptr = getelementptr inbounds i32, i32* %x, i64 1
call void @test7decl(i32* %add.ptr)
- %tmp = load i32* %x, align 4
+ %tmp = load i32, i32* %x, align 4
ret i32 %tmp
; CHECK-LABEL: @test7(
; CHECK: store i32 0
; CHECK: call void @test7decl
-; CHECK: load i32*
+; CHECK: load i32, i32*
}
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
diff --git a/test/Analysis/BasicAA/must-and-partial.ll b/test/Analysis/BasicAA/must-and-partial.ll
index 58139ff..3b4c84a 100644
--- a/test/Analysis/BasicAA/must-and-partial.ll
+++ b/test/Analysis/BasicAA/must-and-partial.ll
@@ -9,7 +9,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; CHECK: PartialAlias: i16* %bigbase0, i8* %phi
define i8 @test0(i8* %base, i1 %x) {
entry:
- %baseplusone = getelementptr i8* %base, i64 1
+ %baseplusone = getelementptr i8, i8* %base, i64 1
br i1 %x, label %red, label %green
red:
br label %green
@@ -20,20 +20,20 @@ green:
%bigbase0 = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase0
- %loaded = load i8* %phi
+ %loaded = load i8, i8* %phi
ret i8 %loaded
}
; CHECK: PartialAlias: i16* %bigbase1, i8* %sel
define i8 @test1(i8* %base, i1 %x) {
entry:
- %baseplusone = getelementptr i8* %base, i64 1
+ %baseplusone = getelementptr i8, i8* %base, i64 1
%sel = select i1 %x, i8* %baseplusone, i8* %base
store i8 0, i8* %sel
%bigbase1 = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase1
- %loaded = load i8* %sel
+ %loaded = load i8, i8* %sel
ret i8 %loaded
}
diff --git a/test/Analysis/BasicAA/no-escape-call.ll b/test/Analysis/BasicAA/no-escape-call.ll
index b93db6e..ea33532 100644
--- a/test/Analysis/BasicAA/no-escape-call.ll
+++ b/test/Analysis/BasicAA/no-escape-call.ll
@@ -8,13 +8,13 @@ define i1 @foo(i32 %i) nounwind {
entry:
%arr = alloca [10 x i8*] ; <[10 x i8*]*> [#uses=1]
%tmp2 = call i8* @getPtr( ) nounwind ; <i8*> [#uses=2]
- %tmp4 = getelementptr [10 x i8*]* %arr, i32 0, i32 %i ; <i8**> [#uses=2]
+ %tmp4 = getelementptr [10 x i8*], [10 x i8*]* %arr, i32 0, i32 %i ; <i8**> [#uses=2]
store i8* %tmp2, i8** %tmp4, align 4
- %tmp10 = getelementptr i8* %tmp2, i32 10 ; <i8*> [#uses=1]
+ %tmp10 = getelementptr i8, i8* %tmp2, i32 10 ; <i8*> [#uses=1]
store i8 42, i8* %tmp10, align 1
- %tmp14 = load i8** %tmp4, align 4 ; <i8*> [#uses=1]
- %tmp16 = getelementptr i8* %tmp14, i32 10 ; <i8*> [#uses=1]
- %tmp17 = load i8* %tmp16, align 1 ; <i8> [#uses=1]
+ %tmp14 = load i8*, i8** %tmp4, align 4 ; <i8*> [#uses=1]
+ %tmp16 = getelementptr i8, i8* %tmp14, i32 10 ; <i8*> [#uses=1]
+ %tmp17 = load i8, i8* %tmp16, align 1 ; <i8> [#uses=1]
%tmp19 = icmp eq i8 %tmp17, 42 ; <i1> [#uses=1]
ret i1 %tmp19
}
diff --git a/test/Analysis/BasicAA/noalias-bugs.ll b/test/Analysis/BasicAA/noalias-bugs.ll
index 2bcc14f..acb230c 100644
--- a/test/Analysis/BasicAA/noalias-bugs.ll
+++ b/test/Analysis/BasicAA/noalias-bugs.ll
@@ -12,12 +12,12 @@ target triple = "x86_64-unknown-linux-gnu"
define i64 @testcase(%nested * noalias %p1, %nested * noalias %p2,
i32 %a, i32 %b) {
- %ptr = getelementptr inbounds %nested* %p1, i64 -1, i32 0
- %ptr.64 = getelementptr inbounds %nested.i64* %ptr, i64 0, i32 0
- %ptr2= getelementptr inbounds %nested* %p2, i64 0, i32 0
+ %ptr = getelementptr inbounds %nested, %nested* %p1, i64 -1, i32 0
+ %ptr.64 = getelementptr inbounds %nested.i64, %nested.i64* %ptr, i64 0, i32 0
+ %ptr2= getelementptr inbounds %nested, %nested* %p2, i64 0, i32 0
%cmp = icmp ult i32 %a, %b
%either_ptr = select i1 %cmp, %nested.i64* %ptr2, %nested.i64* %ptr
- %either_ptr.64 = getelementptr inbounds %nested.i64* %either_ptr, i64 0, i32 0
+ %either_ptr.64 = getelementptr inbounds %nested.i64, %nested.i64* %either_ptr, i64 0, i32 0
; Because either_ptr.64 and ptr.64 can alias (we used to return noalias)
; elimination of the first store is not valid.
@@ -27,7 +27,7 @@ define i64 @testcase(%nested * noalias %p1, %nested * noalias %p2,
; CHECK; store i64 1
store i64 2, i64* %ptr.64, align 8
- %r = load i64* %either_ptr.64, align 8
+ %r = load i64, i64* %either_ptr.64, align 8
store i64 1, i64* %ptr.64, align 8
ret i64 %r
}
diff --git a/test/Analysis/BasicAA/noalias-geps.ll b/test/Analysis/BasicAA/noalias-geps.ll
index f9ec713..cdec988 100644
--- a/test/Analysis/BasicAA/noalias-geps.ll
+++ b/test/Analysis/BasicAA/noalias-geps.ll
@@ -5,26 +5,26 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
; Check that geps with equal base offsets of noalias base pointers stay noalias.
define i32 @test(i32* %p, i16 %i) {
; CHECK-LABEL: Function: test:
- %pi = getelementptr i32* %p, i32 0
- %pi.next = getelementptr i32* %p, i32 1
+ %pi = getelementptr i32, i32* %p, i32 0
+ %pi.next = getelementptr i32, i32* %p, i32 1
%b = icmp eq i16 %i, 0
br i1 %b, label %bb1, label %bb2
bb1:
- %f = getelementptr i32* %pi, i32 1
- %g = getelementptr i32* %pi.next, i32 1
+ %f = getelementptr i32, i32* %pi, i32 1
+ %g = getelementptr i32, i32* %pi.next, i32 1
br label %bb3
bb2:
- %f2 = getelementptr i32* %pi, i32 1
- %g2 = getelementptr i32* %pi.next, i32 1
+ %f2 = getelementptr i32, i32* %pi, i32 1
+ %g2 = getelementptr i32, i32* %pi.next, i32 1
br label %bb3
bb3:
%ptr_phi = phi i32* [ %f, %bb1 ], [ %f2, %bb2 ]
%ptr_phi2 = phi i32* [ %g, %bb1 ], [ %g2, %bb2 ]
; CHECK: NoAlias: i32* %f1, i32* %g1
- %f1 = getelementptr i32* %ptr_phi , i32 1
- %g1 = getelementptr i32* %ptr_phi2 , i32 1
+ %f1 = getelementptr i32, i32* %ptr_phi , i32 1
+ %g1 = getelementptr i32, i32* %ptr_phi2 , i32 1
ret i32 0
}
@@ -32,25 +32,25 @@ ret i32 0
; Check that geps with equal indices of noalias base pointers stay noalias.
define i32 @test2([2 x i32]* %p, i32 %i) {
; CHECK-LABEL: Function: test2:
- %pi = getelementptr [2 x i32]* %p, i32 0
- %pi.next = getelementptr [2 x i32]* %p, i32 1
+ %pi = getelementptr [2 x i32], [2 x i32]* %p, i32 0
+ %pi.next = getelementptr [2 x i32], [2 x i32]* %p, i32 1
%b = icmp eq i32 %i, 0
br i1 %b, label %bb1, label %bb2
bb1:
- %f = getelementptr [2 x i32]* %pi, i32 1
- %g = getelementptr [2 x i32]* %pi.next, i32 1
+ %f = getelementptr [2 x i32], [2 x i32]* %pi, i32 1
+ %g = getelementptr [2 x i32], [2 x i32]* %pi.next, i32 1
br label %bb3
bb2:
- %f2 = getelementptr [2 x i32]* %pi, i32 1
- %g2 = getelementptr [2 x i32]* %pi.next, i32 1
+ %f2 = getelementptr [2 x i32], [2 x i32]* %pi, i32 1
+ %g2 = getelementptr [2 x i32], [2 x i32]* %pi.next, i32 1
br label %bb3
bb3:
%ptr_phi = phi [2 x i32]* [ %f, %bb1 ], [ %f2, %bb2 ]
%ptr_phi2 = phi [2 x i32]* [ %g, %bb1 ], [ %g2, %bb2 ]
; CHECK: NoAlias: i32* %f1, i32* %g1
- %f1 = getelementptr [2 x i32]* %ptr_phi , i32 1, i32 %i
- %g1 = getelementptr [2 x i32]* %ptr_phi2 , i32 1, i32 %i
+ %f1 = getelementptr [2 x i32], [2 x i32]* %ptr_phi , i32 1, i32 %i
+ %g1 = getelementptr [2 x i32], [2 x i32]* %ptr_phi2 , i32 1, i32 %i
ret i32 0
}
diff --git a/test/Analysis/BasicAA/noalias-param.ll b/test/Analysis/BasicAA/noalias-param.ll
index 6494771..c5b1ebf 100644
--- a/test/Analysis/BasicAA/noalias-param.ll
+++ b/test/Analysis/BasicAA/noalias-param.ll
@@ -6,7 +6,7 @@ define void @no(i32* noalias %a, i32* %b) nounwind {
entry:
store i32 1, i32* %a
%cap = call i32* @captures(i32* %a) nounwind readonly
- %l = load i32* %b
+ %l = load i32, i32* %b
ret void
}
@@ -16,7 +16,7 @@ define void @yes(i32* %c, i32* %d) nounwind {
entry:
store i32 1, i32* %c
%cap = call i32* @captures(i32* %c) nounwind readonly
- %l = load i32* %d
+ %l = load i32, i32* %d
ret void
}
diff --git a/test/Analysis/BasicAA/nocapture.ll b/test/Analysis/BasicAA/nocapture.ll
index ffc0a09..26cb69b 100644
--- a/test/Analysis/BasicAA/nocapture.ll
+++ b/test/Analysis/BasicAA/nocapture.ll
@@ -6,9 +6,9 @@ define i32 @test2() {
; CHECK: ret i32 0
%P = alloca i32
%Q = call i32* @test(i32* %P)
- %a = load i32* %P
+ %a = load i32, i32* %P
store i32 4, i32* %Q ;; cannot clobber P since it is nocapture.
- %b = load i32* %P
+ %b = load i32, i32* %P
%c = sub i32 %a, %b
ret i32 %c
}
@@ -19,7 +19,7 @@ define i32 @test4(i32* noalias nocapture %p) nounwind {
; CHECK: call void @test3
; CHECK: store i32 0, i32* %p
; CHECK: store i32 1, i32* %x
-; CHECK: %y = load i32* %p
+; CHECK: %y = load i32, i32* %p
; CHECK: ret i32 %y
entry:
%q = alloca i32*
@@ -27,10 +27,10 @@ entry:
; attribute since the copy doesn't outlive the function.
call void @test3(i32** %q, i32* %p) nounwind
store i32 0, i32* %p
- %x = load i32** %q
+ %x = load i32*, i32** %q
; This store might write to %p and so we can't eliminate the subsequent
; load
store i32 1, i32* %x
- %y = load i32* %p
+ %y = load i32, i32* %p
ret i32 %y
}
diff --git a/test/Analysis/BasicAA/phi-aa.ll b/test/Analysis/BasicAA/phi-aa.ll
index c1100f1..3944e9e 100644
--- a/test/Analysis/BasicAA/phi-aa.ll
+++ b/test/Analysis/BasicAA/phi-aa.ll
@@ -25,9 +25,9 @@ bb1:
bb2:
%P = phi i32* [ @X, %bb ], [ @Y, %bb1 ]
- %tmp1 = load i32* @Z, align 4
+ %tmp1 = load i32, i32* @Z, align 4
store i32 123, i32* %P, align 4
- %tmp2 = load i32* @Z, align 4
+ %tmp2 = load i32, i32* @Z, align 4
br label %return
return:
@@ -52,23 +52,23 @@ codeRepl:
br i1 %targetBlock, label %for.body, label %bye
for.body:
- %1 = load i32* %jj7, align 4
+ %1 = load i32, i32* %jj7, align 4
%idxprom4 = zext i32 %1 to i64
- %arrayidx5 = getelementptr inbounds [100 x i32]* %oa5, i64 0, i64 %idxprom4
- %2 = load i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %oa5, i64 0, i64 %idxprom4
+ %2 = load i32, i32* %arrayidx5, align 4
%sub6 = sub i32 %2, 6
store i32 %sub6, i32* %arrayidx5, align 4
; %0 and %arrayidx5 can alias! It is not safe to DSE the above store.
- %3 = load i32* %0, align 4
+ %3 = load i32, i32* %0, align 4
store i32 %3, i32* %arrayidx5, align 4
%sub11 = add i32 %1, -1
%idxprom12 = zext i32 %sub11 to i64
- %arrayidx13 = getelementptr inbounds [100 x i32]* %oa5, i64 0, i64 %idxprom12
+ %arrayidx13 = getelementptr inbounds [100 x i32], [100 x i32]* %oa5, i64 0, i64 %idxprom12
call void @inc(i32* %jj7)
br label %codeRepl
bye:
- %.reload = load i32* %jj7, align 4
+ %.reload = load i32, i32* %jj7, align 4
ret i32 %.reload
}
diff --git a/test/Analysis/BasicAA/phi-spec-order.ll b/test/Analysis/BasicAA/phi-spec-order.ll
index 4172d09..b326dc3 100644
--- a/test/Analysis/BasicAA/phi-spec-order.ll
+++ b/test/Analysis/BasicAA/phi-spec-order.ll
@@ -14,7 +14,7 @@ for.cond2.preheader: ; preds = %for.end, %entry
br label %for.body4
for.body4: ; preds = %for.body4, %for.cond2.preheader
- %lsr.iv4 = phi [16000 x double]* [ %i11, %for.body4 ], [ bitcast (double* getelementptr inbounds ([16000 x double]* @Y, i64 0, i64 8)
+ %lsr.iv4 = phi [16000 x double]* [ %i11, %for.body4 ], [ bitcast (double* getelementptr inbounds ([16000 x double], [16000 x double]* @Y, i64 0, i64 8)
to [16000 x double]*), %for.cond2.preheader ]
%lsr.iv1 = phi [16000 x double]* [ %i10, %for.body4 ], [ @X, %for.cond2.preheader ]
@@ -23,23 +23,23 @@ for.body4: ; preds = %for.body4, %for.con
%lsr.iv = phi i32 [ %lsr.iv.next, %for.body4 ], [ 16000, %for.cond2.preheader ]
%lsr.iv46 = bitcast [16000 x double]* %lsr.iv4 to <4 x double>*
%lsr.iv12 = bitcast [16000 x double]* %lsr.iv1 to <4 x double>*
- %scevgep11 = getelementptr <4 x double>* %lsr.iv46, i64 -2
- %i6 = load <4 x double>* %scevgep11, align 32
+ %scevgep11 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 -2
+ %i6 = load <4 x double>, <4 x double>* %scevgep11, align 32
%add = fadd <4 x double> %i6, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
store <4 x double> %add, <4 x double>* %lsr.iv12, align 32
- %scevgep10 = getelementptr <4 x double>* %lsr.iv46, i64 -1
- %i7 = load <4 x double>* %scevgep10, align 32
+ %scevgep10 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 -1
+ %i7 = load <4 x double>, <4 x double>* %scevgep10, align 32
%add.4 = fadd <4 x double> %i7, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
- %scevgep9 = getelementptr <4 x double>* %lsr.iv12, i64 1
+ %scevgep9 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 1
store <4 x double> %add.4, <4 x double>* %scevgep9, align 32
- %i8 = load <4 x double>* %lsr.iv46, align 32
+ %i8 = load <4 x double>, <4 x double>* %lsr.iv46, align 32
%add.8 = fadd <4 x double> %i8, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
- %scevgep8 = getelementptr <4 x double>* %lsr.iv12, i64 2
+ %scevgep8 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 2
store <4 x double> %add.8, <4 x double>* %scevgep8, align 32
- %scevgep7 = getelementptr <4 x double>* %lsr.iv46, i64 1
- %i9 = load <4 x double>* %scevgep7, align 32
+ %scevgep7 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 1
+ %i9 = load <4 x double>, <4 x double>* %scevgep7, align 32
%add.12 = fadd <4 x double> %i9, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
- %scevgep3 = getelementptr <4 x double>* %lsr.iv12, i64 3
+ %scevgep3 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 3
store <4 x double> %add.12, <4 x double>* %scevgep3, align 32
; CHECK: NoAlias:{{[ \t]+}}<4 x double>* %scevgep11, <4 x double>* %scevgep7
@@ -50,9 +50,9 @@ for.body4: ; preds = %for.body4, %for.con
; CHECK: NoAlias:{{[ \t]+}}<4 x double>* %scevgep3, <4 x double>* %scevgep9
%lsr.iv.next = add i32 %lsr.iv, -16
- %scevgep = getelementptr [16000 x double]* %lsr.iv1, i64 0, i64 16
+ %scevgep = getelementptr [16000 x double], [16000 x double]* %lsr.iv1, i64 0, i64 16
%i10 = bitcast double* %scevgep to [16000 x double]*
- %scevgep5 = getelementptr [16000 x double]* %lsr.iv4, i64 0, i64 16
+ %scevgep5 = getelementptr [16000 x double], [16000 x double]* %lsr.iv4, i64 0, i64 16
%i11 = bitcast double* %scevgep5 to [16000 x double]*
%exitcond.15 = icmp eq i32 %lsr.iv.next, 0
br i1 %exitcond.15, label %for.end, label %for.body4
diff --git a/test/Analysis/BasicAA/phi-speculation.ll b/test/Analysis/BasicAA/phi-speculation.ll
index 5e1e118..ed0d49b 100644
--- a/test/Analysis/BasicAA/phi-speculation.ll
+++ b/test/Analysis/BasicAA/phi-speculation.ll
@@ -8,7 +8,7 @@ target datalayout =
; CHECK: NoAlias: i32* %ptr2_phi, i32* %ptr_phi
define i32 @test_noalias_1(i32* %ptr2, i32 %count, i32* %coeff) {
entry:
- %ptr = getelementptr inbounds i32* %ptr2, i64 1
+ %ptr = getelementptr inbounds i32, i32* %ptr2, i64 1
br label %while.body
while.body:
@@ -17,15 +17,15 @@ while.body:
%ptr2_phi = phi i32* [ %ptr2, %entry ], [ %ptr2_inc, %while.body ]
%result.09 = phi i32 [ 0 , %entry ], [ %add, %while.body ]
%dec = add nsw i32 %num, -1
- %0 = load i32* %ptr_phi, align 4
+ %0 = load i32, i32* %ptr_phi, align 4
store i32 %0, i32* %ptr2_phi, align 4
- %1 = load i32* %coeff, align 4
- %2 = load i32* %ptr_phi, align 4
+ %1 = load i32, i32* %coeff, align 4
+ %2 = load i32, i32* %ptr_phi, align 4
%mul = mul nsw i32 %1, %2
%add = add nsw i32 %mul, %result.09
%tobool = icmp eq i32 %dec, 0
- %ptr_inc = getelementptr inbounds i32* %ptr_phi, i64 1
- %ptr2_inc = getelementptr inbounds i32* %ptr2_phi, i64 1
+ %ptr_inc = getelementptr inbounds i32, i32* %ptr_phi, i64 1
+ %ptr2_inc = getelementptr inbounds i32, i32* %ptr2_phi, i64 1
br i1 %tobool, label %the_exit, label %while.body
the_exit:
@@ -37,7 +37,7 @@ the_exit:
; CHECK: NoAlias: i32* %ptr2_phi, i32* %ptr_phi
define i32 @test_noalias_2(i32* %ptr2, i32 %count, i32* %coeff) {
entry:
- %ptr = getelementptr inbounds i32* %ptr2, i64 1
+ %ptr = getelementptr inbounds i32, i32* %ptr2, i64 1
br label %outer.while.header
outer.while.header:
@@ -52,20 +52,20 @@ while.body:
%ptr2_phi = phi i32* [ %ptr_outer_phi2, %outer.while.header ], [ %ptr2_inc, %while.body ]
%result.09 = phi i32 [ 0 , %outer.while.header ], [ %add, %while.body ]
%dec = add nsw i32 %num, -1
- %0 = load i32* %ptr_phi, align 4
+ %0 = load i32, i32* %ptr_phi, align 4
store i32 %0, i32* %ptr2_phi, align 4
- %1 = load i32* %coeff, align 4
- %2 = load i32* %ptr_phi, align 4
+ %1 = load i32, i32* %coeff, align 4
+ %2 = load i32, i32* %ptr_phi, align 4
%mul = mul nsw i32 %1, %2
%add = add nsw i32 %mul, %result.09
%tobool = icmp eq i32 %dec, 0
- %ptr_inc = getelementptr inbounds i32* %ptr_phi, i64 1
- %ptr2_inc = getelementptr inbounds i32* %ptr2_phi, i64 1
+ %ptr_inc = getelementptr inbounds i32, i32* %ptr_phi, i64 1
+ %ptr2_inc = getelementptr inbounds i32, i32* %ptr2_phi, i64 1
br i1 %tobool, label %outer.while.backedge, label %while.body
outer.while.backedge:
- %ptr_inc_outer = getelementptr inbounds i32* %ptr_phi, i64 1
- %ptr2_inc_outer = getelementptr inbounds i32* %ptr2_phi, i64 1
+ %ptr_inc_outer = getelementptr inbounds i32, i32* %ptr_phi, i64 1
+ %ptr2_inc_outer = getelementptr inbounds i32, i32* %ptr2_phi, i64 1
%dec.outer = add nsw i32 %num.outer, -1
%br.cond = icmp eq i32 %dec.outer, 0
br i1 %br.cond, label %the_exit, label %outer.while.header
diff --git a/test/Analysis/BasicAA/pr18573.ll b/test/Analysis/BasicAA/pr18573.ll
index 1d2a316..ea5e4a2 100644
--- a/test/Analysis/BasicAA/pr18573.ll
+++ b/test/Analysis/BasicAA/pr18573.ll
@@ -10,8 +10,8 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*, <8 x i32>,
; Function Attrs: nounwind
define <8 x float> @foo1(i8* noalias readonly %arr.ptr, <8 x i32>* noalias readonly %vix.ptr, i8* noalias %t2.ptr) #1 {
allocas:
- %vix = load <8 x i32>* %vix.ptr, align 4
- %t1.ptr = getelementptr i8* %arr.ptr, i8 4
+ %vix = load <8 x i32>, <8 x i32>* %vix.ptr, align 4
+ %t1.ptr = getelementptr i8, i8* %arr.ptr, i8 4
%v1 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 1) #2
store i8 1, i8* %t1.ptr, align 4
@@ -31,8 +31,8 @@ allocas:
; Function Attrs: nounwind
define <8 x float> @foo2(i8* noalias readonly %arr.ptr, <8 x i32>* noalias readonly %vix.ptr, i8* noalias %t2.ptr) #1 {
allocas:
- %vix = load <8 x i32>* %vix.ptr, align 4
- %t1.ptr = getelementptr i8* %arr.ptr, i8 4
+ %vix = load <8 x i32>, <8 x i32>* %vix.ptr, align 4
+ %t1.ptr = getelementptr i8, i8* %arr.ptr, i8 4
%v1 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 1) #2
store i8 1, i8* %t2.ptr, align 4
diff --git a/test/Analysis/BasicAA/store-promote.ll b/test/Analysis/BasicAA/store-promote.ll
index 0db805c..afe11c2 100644
--- a/test/Analysis/BasicAA/store-promote.ll
+++ b/test/Analysis/BasicAA/store-promote.ll
@@ -10,11 +10,11 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
@C = global [2 x i32] [ i32 4, i32 8 ] ; <[2 x i32]*> [#uses=2]
define i32 @test1(i1 %c) {
- %Atmp = load i32* @A ; <i32> [#uses=2]
+ %Atmp = load i32, i32* @A ; <i32> [#uses=2]
br label %Loop
Loop: ; preds = %Loop, %0
- %ToRemove = load i32* @A ; <i32> [#uses=1]
+ %ToRemove = load i32, i32* @A ; <i32> [#uses=1]
store i32 %Atmp, i32* @B
br i1 %c, label %Out, label %Loop
@@ -24,7 +24,7 @@ Out: ; preds = %Loop
; The Loop block should be empty after the load/store are promoted.
; CHECK: @test1
-; CHECK: load i32* @A
+; CHECK: load i32, i32* @A
; CHECK: Loop:
; CHECK-NEXT: br i1 %c, label %Out, label %Loop
; CHECK: Out:
@@ -35,11 +35,11 @@ define i32 @test2(i1 %c) {
br label %Loop
Loop: ; preds = %Loop, %0
- %AVal = load i32* @A ; <i32> [#uses=2]
- %C0 = getelementptr [2 x i32]* @C, i64 0, i64 0 ; <i32*> [#uses=1]
+ %AVal = load i32, i32* @A ; <i32> [#uses=2]
+ %C0 = getelementptr [2 x i32], [2 x i32]* @C, i64 0, i64 0 ; <i32*> [#uses=1]
store i32 %AVal, i32* %C0
- %BVal = load i32* @B ; <i32> [#uses=2]
- %C1 = getelementptr [2 x i32]* @C, i64 0, i64 1 ; <i32*> [#uses=1]
+ %BVal = load i32, i32* @B ; <i32> [#uses=2]
+ %C1 = getelementptr [2 x i32], [2 x i32]* @C, i64 0, i64 1 ; <i32*> [#uses=1]
store i32 %BVal, i32* %C1
br i1 %c, label %Out, label %Loop
diff --git a/test/Analysis/BasicAA/struct-geps.ll b/test/Analysis/BasicAA/struct-geps.ll
index 3764d48..d63c71a 100644
--- a/test/Analysis/BasicAA/struct-geps.ll
+++ b/test/Analysis/BasicAA/struct-geps.ll
@@ -27,9 +27,9 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; CHECK-DAG: MustAlias: i32* %y, i80* %y_10
define void @test_simple(%struct* %st, i64 %i, i64 %j, i64 %k) {
- %x = getelementptr %struct* %st, i64 %i, i32 0
- %y = getelementptr %struct* %st, i64 %j, i32 1
- %z = getelementptr %struct* %st, i64 %k, i32 2
+ %x = getelementptr %struct, %struct* %st, i64 %i, i32 0
+ %y = getelementptr %struct, %struct* %st, i64 %j, i32 1
+ %z = getelementptr %struct, %struct* %st, i64 %k, i32 2
%y_12 = bitcast i32* %y to %struct*
%y_10 = bitcast i32* %y to i80*
%y_8 = bitcast i32* %y to i64*
@@ -59,9 +59,9 @@ define void @test_simple(%struct* %st, i64 %i, i64 %j, i64 %k) {
; CHECK-DAG: MustAlias: i32* %y, i80* %y_10
define void @test_in_array([1 x %struct]* %st, i64 %i, i64 %j, i64 %k, i64 %i1, i64 %j1, i64 %k1) {
- %x = getelementptr [1 x %struct]* %st, i64 %i, i64 %i1, i32 0
- %y = getelementptr [1 x %struct]* %st, i64 %j, i64 %j1, i32 1
- %z = getelementptr [1 x %struct]* %st, i64 %k, i64 %k1, i32 2
+ %x = getelementptr [1 x %struct], [1 x %struct]* %st, i64 %i, i64 %i1, i32 0
+ %y = getelementptr [1 x %struct], [1 x %struct]* %st, i64 %j, i64 %j1, i32 1
+ %z = getelementptr [1 x %struct], [1 x %struct]* %st, i64 %k, i64 %k1, i32 2
%y_12 = bitcast i32* %y to %struct*
%y_10 = bitcast i32* %y to i80*
%y_8 = bitcast i32* %y to i64*
@@ -91,9 +91,9 @@ define void @test_in_array([1 x %struct]* %st, i64 %i, i64 %j, i64 %k, i64 %i1,
; CHECK-DAG: MustAlias: i32* %y, i80* %y_10
define void @test_in_3d_array([1 x [1 x [1 x %struct]]]* %st, i64 %i, i64 %j, i64 %k, i64 %i1, i64 %j1, i64 %k1, i64 %i2, i64 %j2, i64 %k2, i64 %i3, i64 %j3, i64 %k3) {
- %x = getelementptr [1 x [1 x [1 x %struct]]]* %st, i64 %i, i64 %i1, i64 %i2, i64 %i3, i32 0
- %y = getelementptr [1 x [1 x [1 x %struct]]]* %st, i64 %j, i64 %j1, i64 %j2, i64 %j3, i32 1
- %z = getelementptr [1 x [1 x [1 x %struct]]]* %st, i64 %k, i64 %k1, i64 %k2, i64 %k3, i32 2
+ %x = getelementptr [1 x [1 x [1 x %struct]]], [1 x [1 x [1 x %struct]]]* %st, i64 %i, i64 %i1, i64 %i2, i64 %i3, i32 0
+ %y = getelementptr [1 x [1 x [1 x %struct]]], [1 x [1 x [1 x %struct]]]* %st, i64 %j, i64 %j1, i64 %j2, i64 %j3, i32 1
+ %z = getelementptr [1 x [1 x [1 x %struct]]], [1 x [1 x [1 x %struct]]]* %st, i64 %k, i64 %k1, i64 %k2, i64 %k3, i32 2
%y_12 = bitcast i32* %y to %struct*
%y_10 = bitcast i32* %y to i80*
%y_8 = bitcast i32* %y to i64*
@@ -116,13 +116,13 @@ define void @test_in_3d_array([1 x [1 x [1 x %struct]]]* %st, i64 %i, i64 %j, i6
; CHECK-DAG: PartialAlias: i32* %y2, i32* %z
define void @test_same_underlying_object_same_indices(%struct* %st, i64 %i, i64 %j, i64 %k) {
- %st2 = getelementptr %struct* %st, i32 10
- %x2 = getelementptr %struct* %st2, i64 %i, i32 0
- %y2 = getelementptr %struct* %st2, i64 %j, i32 1
- %z2 = getelementptr %struct* %st2, i64 %k, i32 2
- %x = getelementptr %struct* %st, i64 %i, i32 0
- %y = getelementptr %struct* %st, i64 %j, i32 1
- %z = getelementptr %struct* %st, i64 %k, i32 2
+ %st2 = getelementptr %struct, %struct* %st, i32 10
+ %x2 = getelementptr %struct, %struct* %st2, i64 %i, i32 0
+ %y2 = getelementptr %struct, %struct* %st2, i64 %j, i32 1
+ %z2 = getelementptr %struct, %struct* %st2, i64 %k, i32 2
+ %x = getelementptr %struct, %struct* %st, i64 %i, i32 0
+ %y = getelementptr %struct, %struct* %st, i64 %j, i32 1
+ %z = getelementptr %struct, %struct* %st, i64 %k, i32 2
ret void
}
@@ -142,13 +142,13 @@ define void @test_same_underlying_object_same_indices(%struct* %st, i64 %i, i64
; CHECK-DAG: PartialAlias: i32* %y2, i32* %z
define void @test_same_underlying_object_different_indices(%struct* %st, i64 %i1, i64 %j1, i64 %k1, i64 %i2, i64 %k2, i64 %j2) {
- %st2 = getelementptr %struct* %st, i32 10
- %x2 = getelementptr %struct* %st2, i64 %i2, i32 0
- %y2 = getelementptr %struct* %st2, i64 %j2, i32 1
- %z2 = getelementptr %struct* %st2, i64 %k2, i32 2
- %x = getelementptr %struct* %st, i64 %i1, i32 0
- %y = getelementptr %struct* %st, i64 %j1, i32 1
- %z = getelementptr %struct* %st, i64 %k1, i32 2
+ %st2 = getelementptr %struct, %struct* %st, i32 10
+ %x2 = getelementptr %struct, %struct* %st2, i64 %i2, i32 0
+ %y2 = getelementptr %struct, %struct* %st2, i64 %j2, i32 1
+ %z2 = getelementptr %struct, %struct* %st2, i64 %k2, i32 2
+ %x = getelementptr %struct, %struct* %st, i64 %i1, i32 0
+ %y = getelementptr %struct, %struct* %st, i64 %j1, i32 1
+ %z = getelementptr %struct, %struct* %st, i64 %k1, i32 2
ret void
}
@@ -158,7 +158,7 @@ define void @test_same_underlying_object_different_indices(%struct* %st, i64 %i1
; CHECK-LABEL: test_struct_in_array
; CHECK-DAG: MustAlias: i32* %x, i32* %y
define void @test_struct_in_array(%struct2* %st, i64 %i, i64 %j, i64 %k) {
- %x = getelementptr %struct2* %st, i32 0, i32 1, i32 1, i32 0
- %y = getelementptr %struct2* %st, i32 0, i32 0, i32 1, i32 1
+ %x = getelementptr %struct2, %struct2* %st, i32 0, i32 1, i32 1, i32 0
+ %y = getelementptr %struct2, %struct2* %st, i32 0, i32 0, i32 1, i32 1
ret void
}
diff --git a/test/Analysis/BasicAA/tailcall-modref.ll b/test/Analysis/BasicAA/tailcall-modref.ll
index ebeb28c..5857e68 100644
--- a/test/Analysis/BasicAA/tailcall-modref.ll
+++ b/test/Analysis/BasicAA/tailcall-modref.ll
@@ -4,9 +4,9 @@ define i32 @test() {
; CHECK: ret i32 0
%A = alloca i32 ; <i32*> [#uses=3]
call void @foo( i32* %A )
- %X = load i32* %A ; <i32> [#uses=1]
+ %X = load i32, i32* %A ; <i32> [#uses=1]
tail call void @bar( )
- %Y = load i32* %A ; <i32> [#uses=1]
+ %Y = load i32, i32* %A ; <i32> [#uses=1]
%Z = sub i32 %X, %Y ; <i32> [#uses=1]
ret i32 %Z
}
diff --git a/test/Analysis/BasicAA/underlying-value.ll b/test/Analysis/BasicAA/underlying-value.ll
index 0671c82..0cfbdb8 100644
--- a/test/Analysis/BasicAA/underlying-value.ll
+++ b/test/Analysis/BasicAA/underlying-value.ll
@@ -14,10 +14,10 @@ for.cond2: ; preds = %for.body5, %for.con
br i1 false, label %for.body5, label %for.cond
for.body5: ; preds = %for.cond2
- %arrayidx = getelementptr inbounds [2 x i64]* undef, i32 0, i64 0
- %tmp7 = load i64* %arrayidx, align 8
- %arrayidx9 = getelementptr inbounds [2 x i64]* undef, i32 0, i64 undef
- %tmp10 = load i64* %arrayidx9, align 8
+ %arrayidx = getelementptr inbounds [2 x i64], [2 x i64]* undef, i32 0, i64 0
+ %tmp7 = load i64, i64* %arrayidx, align 8
+ %arrayidx9 = getelementptr inbounds [2 x i64], [2 x i64]* undef, i32 0, i64 undef
+ %tmp10 = load i64, i64* %arrayidx9, align 8
br label %for.cond2
for.end22: ; preds = %for.cond
diff --git a/test/Analysis/BasicAA/unreachable-block.ll b/test/Analysis/BasicAA/unreachable-block.ll
index 1ca1e66..551d18e 100644
--- a/test/Analysis/BasicAA/unreachable-block.ll
+++ b/test/Analysis/BasicAA/unreachable-block.ll
@@ -11,6 +11,6 @@ bb:
%t = select i1 undef, i32* %t, i32* undef
%p = select i1 undef, i32* %p, i32* %p
%q = select i1 undef, i32* undef, i32* %p
- %a = getelementptr i8* %a, i32 0
+ %a = getelementptr i8, i8* %a, i32 0
unreachable
}
diff --git a/test/Analysis/BasicAA/zext.ll b/test/Analysis/BasicAA/zext.ll
index b59d16c..ed35656 100644
--- a/test/Analysis/BasicAA/zext.ll
+++ b/test/Analysis/BasicAA/zext.ll
@@ -7,10 +7,10 @@ target triple = "x86_64-unknown-linux-gnu"
define void @test_with_zext() {
%1 = tail call i8* @malloc(i64 120)
- %a = getelementptr inbounds i8* %1, i64 8
- %2 = getelementptr inbounds i8* %1, i64 16
+ %a = getelementptr inbounds i8, i8* %1, i64 8
+ %2 = getelementptr inbounds i8, i8* %1, i64 16
%3 = zext i32 3 to i64
- %b = getelementptr inbounds i8* %2, i64 %3
+ %b = getelementptr inbounds i8, i8* %2, i64 %3
ret void
}
@@ -19,10 +19,10 @@ define void @test_with_zext() {
define void @test_with_lshr(i64 %i) {
%1 = tail call i8* @malloc(i64 120)
- %a = getelementptr inbounds i8* %1, i64 8
- %2 = getelementptr inbounds i8* %1, i64 16
+ %a = getelementptr inbounds i8, i8* %1, i64 8
+ %2 = getelementptr inbounds i8, i8* %1, i64 16
%3 = lshr i64 %i, 2
- %b = getelementptr inbounds i8* %2, i64 %3
+ %b = getelementptr inbounds i8, i8* %2, i64 %3
ret void
}
@@ -34,10 +34,10 @@ define void @test_with_a_loop(i8* %mem) {
for.loop:
%i = phi i32 [ 0, %0 ], [ %i.plus1, %for.loop ]
- %a = getelementptr inbounds i8* %mem, i64 8
- %a.plus1 = getelementptr inbounds i8* %mem, i64 16
+ %a = getelementptr inbounds i8, i8* %mem, i64 8
+ %a.plus1 = getelementptr inbounds i8, i8* %mem, i64 16
%i.64 = zext i32 %i to i64
- %b = getelementptr inbounds i8* %a.plus1, i64 %i.64
+ %b = getelementptr inbounds i8, i8* %a.plus1, i64 %i.64
%i.plus1 = add nuw nsw i32 %i, 1
%cmp = icmp eq i32 %i.plus1, 10
br i1 %cmp, label %for.loop.exit, label %for.loop
@@ -55,12 +55,12 @@ define void @test_with_varying_base_pointer_in_loop(i8* %mem.orig) {
for.loop:
%mem = phi i8* [ %mem.orig, %0 ], [ %mem.plus1, %for.loop ]
%i = phi i32 [ 0, %0 ], [ %i.plus1, %for.loop ]
- %a = getelementptr inbounds i8* %mem, i64 8
- %a.plus1 = getelementptr inbounds i8* %mem, i64 16
+ %a = getelementptr inbounds i8, i8* %mem, i64 8
+ %a.plus1 = getelementptr inbounds i8, i8* %mem, i64 16
%i.64 = zext i32 %i to i64
- %b = getelementptr inbounds i8* %a.plus1, i64 %i.64
+ %b = getelementptr inbounds i8, i8* %a.plus1, i64 %i.64
%i.plus1 = add nuw nsw i32 %i, 1
- %mem.plus1 = getelementptr inbounds i8* %mem, i64 8
+ %mem.plus1 = getelementptr inbounds i8, i8* %mem, i64 8
%cmp = icmp eq i32 %i.plus1, 10
br i1 %cmp, label %for.loop.exit, label %for.loop
@@ -74,10 +74,10 @@ for.loop.exit:
define void @test_sign_extension(i32 %p) {
%1 = tail call i8* @malloc(i64 120)
%p.64 = zext i32 %p to i64
- %a = getelementptr inbounds i8* %1, i64 %p.64
+ %a = getelementptr inbounds i8, i8* %1, i64 %p.64
%p.minus1 = add i32 %p, -1
%p.minus1.64 = zext i32 %p.minus1 to i64
- %b.i8 = getelementptr inbounds i8* %1, i64 %p.minus1.64
+ %b.i8 = getelementptr inbounds i8, i8* %1, i64 %p.minus1.64
%b.i64 = bitcast i8* %b.i8 to i64*
ret void
}
@@ -91,13 +91,13 @@ define void @test_fe_tools([8 x i32]* %values) {
for.loop:
%i = phi i32 [ 0, %reorder ], [ %i.next, %for.loop ]
%idxprom = zext i32 %i to i64
- %b = getelementptr inbounds [8 x i32]* %values, i64 0, i64 %idxprom
+ %b = getelementptr inbounds [8 x i32], [8 x i32]* %values, i64 0, i64 %idxprom
%i.next = add nuw nsw i32 %i, 1
%1 = icmp eq i32 %i.next, 10
br i1 %1, label %for.loop.exit, label %for.loop
reorder:
- %a = getelementptr inbounds [8 x i32]* %values, i64 0, i64 1
+ %a = getelementptr inbounds [8 x i32], [8 x i32]* %values, i64 0, i64 1
br label %for.loop
for.loop.exit:
@@ -112,7 +112,7 @@ for.loop.exit:
define void @test_spec2006() {
%h = alloca [1 x [2 x i32*]], align 16
- %d.val = load i32* @d, align 4
+ %d.val = load i32, i32* @d, align 4
%d.promoted = sext i32 %d.val to i64
%1 = icmp slt i32 %d.val, 2
br i1 %1, label %.lr.ph, label %3
@@ -123,13 +123,13 @@ define void @test_spec2006() {
; <label>:2 ; preds = %.lr.ph, %2
%i = phi i32 [ %d.val, %.lr.ph ], [ %i.plus1, %2 ]
%i.promoted = sext i32 %i to i64
- %x = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 %d.promoted, i64 %i.promoted
+ %x = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 %d.promoted, i64 %i.promoted
%i.plus1 = add nsw i32 %i, 1
%cmp = icmp slt i32 %i.plus1, 2
br i1 %cmp, label %2, label %3
; <label>:3 ; preds = %._crit_edge, %0
- %y = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
+ %y = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
ret void
}
@@ -138,8 +138,8 @@ define void @test_spec2006() {
define void @test_modulo_analysis_easy_case(i64 %i) {
%h = alloca [1 x [2 x i32*]], align 16
- %x = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 %i, i64 0
- %y = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
+ %x = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 %i, i64 0
+ %y = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
ret void
}
@@ -153,8 +153,8 @@ define void @test_modulo_analysis_in_loop() {
for.loop:
%i = phi i32 [ 0, %0 ], [ %i.plus1, %for.loop ]
%i.promoted = sext i32 %i to i64
- %x = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 %i.promoted, i64 0
- %y = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
+ %x = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 %i.promoted, i64 0
+ %y = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
%i.plus1 = add nsw i32 %i, 1
%cmp = icmp slt i32 %i.plus1, 2
br i1 %cmp, label %for.loop, label %for.loop.exit
@@ -168,15 +168,15 @@ for.loop.exit:
define void @test_modulo_analysis_with_global() {
%h = alloca [1 x [2 x i32*]], align 16
- %b = load i32* @b, align 4
+ %b = load i32, i32* @b, align 4
%b.promoted = sext i32 %b to i64
br label %for.loop
for.loop:
%i = phi i32 [ 0, %0 ], [ %i.plus1, %for.loop ]
%i.promoted = sext i32 %i to i64
- %x = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 %i.promoted, i64 %b.promoted
- %y = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
+ %x = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 %i.promoted, i64 %b.promoted
+ %y = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
%i.plus1 = add nsw i32 %i, 1
%cmp = icmp slt i32 %i.plus1, 2
br i1 %cmp, label %for.loop, label %for.loop.exit
@@ -188,10 +188,10 @@ for.loop.exit:
; CHECK-LABEL: test_const_eval
; CHECK: NoAlias: i8* %a, i8* %b
define void @test_const_eval(i8* %ptr, i64 %offset) {
- %a = getelementptr inbounds i8* %ptr, i64 %offset
- %a.dup = getelementptr inbounds i8* %ptr, i64 %offset
+ %a = getelementptr inbounds i8, i8* %ptr, i64 %offset
+ %a.dup = getelementptr inbounds i8, i8* %ptr, i64 %offset
%three = zext i32 3 to i64
- %b = getelementptr inbounds i8* %a.dup, i64 %three
+ %b = getelementptr inbounds i8, i8* %a.dup, i64 %three
ret void
}
@@ -200,8 +200,8 @@ define void @test_const_eval(i8* %ptr, i64 %offset) {
define void @test_const_eval_scaled(i8* %ptr) {
%three = zext i32 3 to i64
%six = mul i64 %three, 2
- %a = getelementptr inbounds i8* %ptr, i64 %six
- %b = getelementptr inbounds i8* %ptr, i64 6
+ %a = getelementptr inbounds i8, i8* %ptr, i64 %six
+ %b = getelementptr inbounds i8, i8* %ptr, i64 6
ret void
}
diff --git a/test/Analysis/BlockFrequencyInfo/basic.ll b/test/Analysis/BlockFrequencyInfo/basic.ll
index 1c24176..728adf0 100644
--- a/test/Analysis/BlockFrequencyInfo/basic.ll
+++ b/test/Analysis/BlockFrequencyInfo/basic.ll
@@ -12,8 +12,8 @@ entry:
body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
diff --git a/test/Analysis/BranchProbabilityInfo/basic.ll b/test/Analysis/BranchProbabilityInfo/basic.ll
index 5915ed1..0f66911 100644
--- a/test/Analysis/BranchProbabilityInfo/basic.ll
+++ b/test/Analysis/BranchProbabilityInfo/basic.ll
@@ -9,8 +9,8 @@ entry:
body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
@@ -153,8 +153,8 @@ define i32 @test_cold_call_sites(i32* %a) {
; CHECK: edge entry -> else probability is 64 / 68 = 94.1176% [HOT edge]
entry:
- %gep1 = getelementptr i32* %a, i32 1
- %val1 = load i32* %gep1
+ %gep1 = getelementptr i32, i32* %a, i32 1
+ %val1 = load i32, i32* %gep1
%cond1 = icmp ugt i32 %val1, 1
br i1 %cond1, label %then, label %else
@@ -164,8 +164,8 @@ then:
br label %exit
else:
- %gep2 = getelementptr i32* %a, i32 2
- %val2 = load i32* %gep2
+ %gep2 = getelementptr i32, i32* %a, i32 2
+ %val2 = load i32, i32* %gep2
%val3 = call i32 @regular_function(i32 %val2)
br label %exit
diff --git a/test/Analysis/BranchProbabilityInfo/loop.ll b/test/Analysis/BranchProbabilityInfo/loop.ll
index 40f1111..e792790 100644
--- a/test/Analysis/BranchProbabilityInfo/loop.ll
+++ b/test/Analysis/BranchProbabilityInfo/loop.ll
@@ -88,7 +88,7 @@ entry:
do.body:
%i.0 = phi i32 [ 0, %entry ], [ %inc4, %if.end ]
call void @g1()
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp = icmp slt i32 %0, 42
br i1 %cmp, label %do.body1, label %if.end
; CHECK: edge do.body -> do.body1 probability is 16 / 32 = 50%
@@ -124,7 +124,7 @@ entry:
do.body:
%i.0 = phi i32 [ 0, %entry ], [ %inc4, %do.end ]
call void @g1()
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp = icmp slt i32 %0, 42
br i1 %cmp, label %return, label %do.body1
; CHECK: edge do.body -> return probability is 4 / 128
@@ -169,7 +169,7 @@ do.body:
do.body1:
%j.0 = phi i32 [ 0, %do.body ], [ %inc, %if.end ]
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp = icmp slt i32 %0, 42
br i1 %cmp, label %return, label %if.end
; CHECK: edge do.body1 -> return probability is 4 / 128
@@ -214,7 +214,7 @@ do.body:
do.body1:
%j.0 = phi i32 [ 0, %do.body ], [ %inc, %do.cond ]
call void @g2()
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp = icmp slt i32 %0, 42
br i1 %cmp, label %return, label %do.cond
; CHECK: edge do.body1 -> return probability is 4 / 128
@@ -258,7 +258,7 @@ for.body.lr.ph:
for.body:
%i.011 = phi i32 [ 0, %for.body.lr.ph ], [ %inc6, %for.inc5 ]
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp1 = icmp eq i32 %0, %i.011
br i1 %cmp1, label %for.inc5, label %if.end
; CHECK: edge for.body -> for.inc5 probability is 16 / 32 = 50%
@@ -305,8 +305,8 @@ entry:
for.body.lr.ph:
%cmp216 = icmp sgt i32 %b, 0
- %arrayidx5 = getelementptr inbounds i32* %c, i64 1
- %arrayidx9 = getelementptr inbounds i32* %c, i64 2
+ %arrayidx5 = getelementptr inbounds i32, i32* %c, i64 1
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2
br label %for.body
; CHECK: edge for.body.lr.ph -> for.body probability is 16 / 16 = 100%
@@ -319,21 +319,21 @@ for.body:
for.body3:
%j.017 = phi i32 [ 0, %for.body ], [ %inc, %for.inc ]
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp4 = icmp eq i32 %0, %j.017
br i1 %cmp4, label %for.inc, label %if.end
; CHECK: edge for.body3 -> for.inc probability is 16 / 32 = 50%
; CHECK: edge for.body3 -> if.end probability is 16 / 32 = 50%
if.end:
- %1 = load i32* %arrayidx5, align 4
+ %1 = load i32, i32* %arrayidx5, align 4
%cmp6 = icmp eq i32 %1, %j.017
br i1 %cmp6, label %for.inc, label %if.end8
; CHECK: edge if.end -> for.inc probability is 16 / 32 = 50%
; CHECK: edge if.end -> if.end8 probability is 16 / 32 = 50%
if.end8:
- %2 = load i32* %arrayidx9, align 4
+ %2 = load i32, i32* %arrayidx9, align 4
%cmp10 = icmp eq i32 %2, %j.017
br i1 %cmp10, label %for.inc, label %if.end12
; CHECK: edge if.end8 -> for.inc probability is 16 / 32 = 50%
diff --git a/test/Analysis/BranchProbabilityInfo/pr18705.ll b/test/Analysis/BranchProbabilityInfo/pr18705.ll
index 9f239b4..aff08a6 100644
--- a/test/Analysis/BranchProbabilityInfo/pr18705.ll
+++ b/test/Analysis/BranchProbabilityInfo/pr18705.ll
@@ -22,23 +22,23 @@ while.body:
%d.addr.010 = phi i32* [ %d, %while.body.lr.ph ], [ %incdec.ptr4, %if.end ]
%c.addr.09 = phi i32* [ %c, %while.body.lr.ph ], [ %c.addr.1, %if.end ]
%indvars.iv.next = add nsw i64 %indvars.iv, -1
- %arrayidx = getelementptr inbounds float* %f0, i64 %indvars.iv.next
- %1 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds float* %f1, i64 %indvars.iv.next
- %2 = load float* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds float, float* %f0, i64 %indvars.iv.next
+ %1 = load float, float* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds float, float* %f1, i64 %indvars.iv.next
+ %2 = load float, float* %arrayidx2, align 4
%cmp = fcmp une float %1, %2
br i1 %cmp, label %if.then, label %if.else
if.then:
- %incdec.ptr = getelementptr inbounds i32* %b.addr.011, i64 1
- %3 = load i32* %b.addr.011, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.011, i64 1
+ %3 = load i32, i32* %b.addr.011, align 4
%add = add nsw i32 %3, 12
store i32 %add, i32* %b.addr.011, align 4
br label %if.end
if.else:
- %incdec.ptr3 = getelementptr inbounds i32* %c.addr.09, i64 1
- %4 = load i32* %c.addr.09, align 4
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %c.addr.09, i64 1
+ %4 = load i32, i32* %c.addr.09, align 4
%sub = add nsw i32 %4, -13
store i32 %sub, i32* %c.addr.09, align 4
br label %if.end
@@ -46,7 +46,7 @@ if.else:
if.end:
%c.addr.1 = phi i32* [ %c.addr.09, %if.then ], [ %incdec.ptr3, %if.else ]
%b.addr.1 = phi i32* [ %incdec.ptr, %if.then ], [ %b.addr.011, %if.else ]
- %incdec.ptr4 = getelementptr inbounds i32* %d.addr.010, i64 1
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %d.addr.010, i64 1
store i32 14, i32* %d.addr.010, align 4
%5 = trunc i64 %indvars.iv.next to i32
%tobool = icmp eq i32 %5, 0
diff --git a/test/Analysis/CFLAliasAnalysis/asm-global-bugfix.ll b/test/Analysis/CFLAliasAnalysis/asm-global-bugfix.ll
index d8ee94b..ec2de54 100644
--- a/test/Analysis/CFLAliasAnalysis/asm-global-bugfix.ll
+++ b/test/Analysis/CFLAliasAnalysis/asm-global-bugfix.ll
@@ -7,10 +7,10 @@
@G = private unnamed_addr constant [1 x i8] c"\00", align 1
; CHECK: Function: test_no_crash
-; CHECK: 1 no alias responses
+; CHECK: 0 no alias responses
define void @test_no_crash() #0 {
entry:
call i8* asm "nop", "=r,r"(
- i8* getelementptr inbounds ([1 x i8]* @G, i64 0, i64 0))
+ i8* getelementptr inbounds ([1 x i8], [1 x i8]* @G, i64 0, i64 0))
ret void
}
diff --git a/test/Analysis/CFLAliasAnalysis/branch-alias.ll b/test/Analysis/CFLAliasAnalysis/branch-alias.ll
new file mode 100644
index 0000000..8307462
--- /dev/null
+++ b/test/Analysis/CFLAliasAnalysis/branch-alias.ll
@@ -0,0 +1,73 @@
+; Makes sure that we give up on some pathological cases with inttoptr/ptrtoint
+;
+; @ptr_test was generated from the following C code:
+; void ptr_test() {
+; int* A;
+; unsigned long RefCopy = 0;
+; for (int i = 0; i < 8*sizeof(&A); ++i) {
+; if ((unsigned long)&A & (1UL << i))
+; RefCopy |= 1UL << i;
+; }
+;
+; int** AliasA1 = (int**)RefCopy;
+; int* ShouldAliasA = *AliasA1;
+; }
+
+; RUN: opt < %s -cfl-aa -aa-eval -print-may-aliases -disable-output 2>&1 | FileCheck %s
+
+; CHECK: Function: ptr_test
+define void @ptr_test() #0 {
+ ; CHECK: MayAlias: i32** %A, i32** %ShouldAliasA
+ ; CHECK-NOT: %AliasA1
+entry:
+ %A = alloca i32*, align 8
+ %RefCopy = alloca i64, align 8
+ %i = alloca i32, align 4
+ %AliasA1 = alloca i32**, align 8
+ %ShouldAliasA = alloca i32*, align 8
+ store i64 0, i64* %RefCopy, align 8
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32, i32* %i, align 4
+ %conv = sext i32 %0 to i64
+ %cmp = icmp ult i64 %conv, 64
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = ptrtoint i32** %A to i64
+ %2 = load i32, i32* %i, align 4
+ %sh_prom = zext i32 %2 to i64
+ %shl = shl i64 1, %sh_prom
+ %and = and i64 %1, %shl
+ %tobool = icmp ne i64 %and, 0
+ br i1 %tobool, label %if.then, label %if.end
+
+if.then: ; preds = %for.body
+ %3 = load i32, i32* %i, align 4
+ %sh_prom2 = zext i32 %3 to i64
+ %shl3 = shl i64 1, %sh_prom2
+ %4 = load i64, i64* %RefCopy, align 8
+ %or = or i64 %4, %shl3
+ store i64 %or, i64* %RefCopy, align 8
+ br label %if.end
+
+if.end: ; preds = %if.then, %for.body
+ br label %for.inc
+
+for.inc: ; preds = %if.end
+ %5 = load i32, i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i64, i64* %RefCopy, align 8
+ %7 = inttoptr i64 %6 to i32**
+ store i32** %7, i32*** %AliasA1, align 8
+ %8 = load i32**, i32*** %AliasA1, align 8
+ %9 = load i32*, i32** %8, align 8
+ store i32* %9, i32** %ShouldAliasA, align 8
+ ret void
+}
diff --git a/test/Analysis/CFLAliasAnalysis/const-expr-gep.ll b/test/Analysis/CFLAliasAnalysis/const-expr-gep.ll
index 9ae200b..c7ff407 100644
--- a/test/Analysis/CFLAliasAnalysis/const-expr-gep.ll
+++ b/test/Analysis/CFLAliasAnalysis/const-expr-gep.ll
@@ -7,15 +7,51 @@
%T = type { i32, [10 x i8] }
@G = external global %T
+@G2 = external global %T
-; CHECK: Function: test
-; CHECK-NOT: May:
+; TODO: Quite a few of these are MayAlias because we don't yet consider
+; constant offsets in CFLAA. If we start doing so, then we'll need to
+; change these test cases
+; CHECK: Function: test
+; CHECK: MayAlias: i32* %D, i32* %F
+; CHECK: MayAlias: i32* %D, i8* %X
+; CHECK: MayAlias: i32* %F, i8* %X
define void @test() {
- %D = getelementptr %T* @G, i64 0, i32 0
- %E = getelementptr %T* @G, i64 0, i32 1, i64 5
- %F = getelementptr i32* getelementptr (%T* @G, i64 0, i32 0), i64 0
- %X = getelementptr [10 x i8]* getelementptr (%T* @G, i64 0, i32 1), i64 0, i64 5
+ %D = getelementptr %T, %T* @G, i64 0, i32 0
+ %F = getelementptr i32, i32* getelementptr (%T, %T* @G, i64 0, i32 0), i64 0
+ %X = getelementptr [10 x i8], [10 x i8]* getelementptr (%T, %T* @G, i64 0, i32 1), i64 0, i64 5
+
+ ret void
+}
+
+; CHECK: Function: simplecheck
+; CHECK: MayAlias: i32* %F, i32* %arg0
+; CHECK: MayAlias: i32* %H, i32* %arg0
+; CHECK: MayAlias: i32* %F, i32* %H
+define void @simplecheck(i32* %arg0) {
+ %F = getelementptr i32, i32* getelementptr (%T, %T* @G, i64 0, i32 0), i64 0
+ %H = getelementptr %T, %T* @G2, i64 0, i32 0
+
+ ret void
+}
+
+; Ensure that CFLAA properly identifies and handles escaping variables (i.e.
+; globals) in nested ConstantExprs
+
+; CHECK: Function: checkNesting
+; CHECK: MayAlias: i32* %A, i32* %arg0
+
+%NestedT = type { [1 x [1 x i32]] }
+@NT = external global %NestedT
+define void @checkNesting(i32* %arg0) {
+ %A = getelementptr [1 x i32],
+ [1 x i32]* getelementptr
+ ([1 x [1 x i32]], [1 x [1 x i32]]* getelementptr (%NestedT, %NestedT* @NT, i64 0, i32 0),
+ i64 0,
+ i32 0),
+ i64 0,
+ i32 0
ret void
}
diff --git a/test/Analysis/CFLAliasAnalysis/constant-over-index.ll b/test/Analysis/CFLAliasAnalysis/constant-over-index.ll
index fb44b95..a8e00aa 100644
--- a/test/Analysis/CFLAliasAnalysis/constant-over-index.ll
+++ b/test/Analysis/CFLAliasAnalysis/constant-over-index.ll
@@ -10,13 +10,13 @@
define void @foo([3 x [3 x double]]* noalias %p) {
entry:
- %p3 = getelementptr [3 x [3 x double]]* %p, i64 0, i64 0, i64 3
+ %p3 = getelementptr [3 x [3 x double]], [3 x [3 x double]]* %p, i64 0, i64 0, i64 3
br label %loop
loop:
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
- %p.0.i.0 = getelementptr [3 x [3 x double]]* %p, i64 0, i64 %i, i64 0
+ %p.0.i.0 = getelementptr [3 x [3 x double]], [3 x [3 x double]]* %p, i64 0, i64 %i, i64 0
store volatile double 0.0, double* %p3
store volatile double 0.1, double* %p.0.i.0
diff --git a/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll b/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll
index 21edfc2..adacf04 100644
--- a/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll
+++ b/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll
@@ -20,13 +20,13 @@ define i32 @signbit(double %x) nounwind {
; CHECK: ret i32 0
entry:
%u = alloca %union.anon, align 8
- %tmp9 = getelementptr inbounds %union.anon* %u, i64 0, i32 0
+ %tmp9 = getelementptr inbounds %union.anon, %union.anon* %u, i64 0, i32 0
store double %x, double* %tmp9, align 8, !tbaa !0
- %tmp2 = load i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3
+ %tmp2 = load i32, i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3
%idxprom = sext i32 %tmp2 to i64
%tmp4 = bitcast %union.anon* %u to [2 x i32]*
- %arrayidx = getelementptr inbounds [2 x i32]* %tmp4, i64 0, i64 %idxprom
- %tmp5 = load i32* %arrayidx, align 4, !tbaa !3
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %tmp4, i64 0, i64 %idxprom
+ %tmp5 = load i32, i32* %arrayidx, align 4, !tbaa !3
%tmp5.lobit = lshr i32 %tmp5, 31
ret i32 %tmp5.lobit
}
diff --git a/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll b/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll
index 19d251c..c2fcf32 100644
--- a/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll
+++ b/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll
@@ -10,10 +10,10 @@ define i32 @test(i32 %indvar) nounwind {
%tab = alloca i32, align 4
%tmp31 = mul i32 %indvar, -2
%tmp32 = add i32 %tmp31, 30
- %t.5 = getelementptr i32* %tab, i32 %tmp32
- %loada = load i32* %tab
+ %t.5 = getelementptr i32, i32* %tab, i32 %tmp32
+ %loada = load i32, i32* %tab
store i32 0, i32* %t.5
- %loadb = load i32* %tab
+ %loadb = load i32, i32* %tab
%rval = add i32 %loada, %loadb
ret i32 %rval
}
diff --git a/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll b/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll
index 9bbc721..e997374 100644
--- a/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll
+++ b/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll
@@ -25,7 +25,7 @@ define void @test(i1 %C) {
store %T* %MS, %T** %M
- %AP = load %T** %M ; PartialAlias with %A, %B
+ %AP = load %T*, %T** %M ; PartialAlias with %A, %B
ret void
}
diff --git a/test/Analysis/CFLAliasAnalysis/multilevel.ll b/test/Analysis/CFLAliasAnalysis/multilevel.ll
index 9c9eb9a..d42dca4 100644
--- a/test/Analysis/CFLAliasAnalysis/multilevel.ll
+++ b/test/Analysis/CFLAliasAnalysis/multilevel.ll
@@ -23,8 +23,8 @@ define void @test() {
store %T* %A, %T** %M
store %T* %B, %T** %N
- %AP = load %T** %M ; PartialAlias with %A
- %BP = load %T** %N ; PartialAlias with %B
+ %AP = load %T*, %T** %M ; PartialAlias with %A
+ %BP = load %T*, %T** %N ; PartialAlias with %B
ret void
}
diff --git a/test/Analysis/CFLAliasAnalysis/must-and-partial.ll b/test/Analysis/CFLAliasAnalysis/must-and-partial.ll
index 163a6c3..9deacf8 100644
--- a/test/Analysis/CFLAliasAnalysis/must-and-partial.ll
+++ b/test/Analysis/CFLAliasAnalysis/must-and-partial.ll
@@ -10,7 +10,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define i8 @test0(i1 %x) {
entry:
%base = alloca i8, align 4
- %baseplusone = getelementptr i8* %base, i64 1
+ %baseplusone = getelementptr i8, i8* %base, i64 1
br i1 %x, label %red, label %green
red:
br label %green
@@ -21,7 +21,7 @@ green:
%bigbase0 = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase0
- %loaded = load i8* %phi
+ %loaded = load i8, i8* %phi
ret i8 %loaded
}
@@ -30,14 +30,14 @@ green:
define i8 @test1(i1 %x) {
entry:
%base = alloca i8, align 4
- %baseplusone = getelementptr i8* %base, i64 1
+ %baseplusone = getelementptr i8, i8* %base, i64 1
%sel = select i1 %x, i8* %baseplusone, i8* %base
store i8 0, i8* %sel
%bigbase1 = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase1
- %loaded = load i8* %sel
+ %loaded = load i8, i8* %sel
ret i8 %loaded
}
@@ -45,10 +45,10 @@ entry:
; even if they are nocapture
; CHECK: MayAlias: double* %A, double* %Index
define void @testr2(double* nocapture readonly %A, double* nocapture readonly %Index) {
- %arrayidx22 = getelementptr inbounds double* %Index, i64 2
- %1 = load double* %arrayidx22
- %arrayidx25 = getelementptr inbounds double* %A, i64 2
- %2 = load double* %arrayidx25
+ %arrayidx22 = getelementptr inbounds double, double* %Index, i64 2
+ %1 = load double, double* %arrayidx22
+ %arrayidx25 = getelementptr inbounds double, double* %A, i64 2
+ %2 = load double, double* %arrayidx25
%mul26 = fmul double %1, %2
ret void
}
diff --git a/test/Analysis/CFLAliasAnalysis/simple.ll b/test/Analysis/CFLAliasAnalysis/simple.ll
index 7bc455a..adc7186 100644
--- a/test/Analysis/CFLAliasAnalysis/simple.ll
+++ b/test/Analysis/CFLAliasAnalysis/simple.ll
@@ -9,10 +9,10 @@
; CHECK-NOT: May:
define void @test(%T* %P) {
- %A = getelementptr %T* %P, i64 0
- %B = getelementptr %T* %P, i64 0, i32 0
- %C = getelementptr %T* %P, i64 0, i32 1
- %D = getelementptr %T* %P, i64 0, i32 1, i64 0
- %E = getelementptr %T* %P, i64 0, i32 1, i64 5
+ %A = getelementptr %T, %T* %P, i64 0
+ %B = getelementptr %T, %T* %P, i64 0, i32 0
+ %C = getelementptr %T, %T* %P, i64 0, i32 1
+ %D = getelementptr %T, %T* %P, i64 0, i32 1, i64 0
+ %E = getelementptr %T, %T* %P, i64 0, i32 1, i64 5
ret void
}
diff --git a/test/Analysis/CFLAliasAnalysis/stratified-attrs-indexing.ll b/test/Analysis/CFLAliasAnalysis/stratified-attrs-indexing.ll
index 8afedf2..3475285 100644
--- a/test/Analysis/CFLAliasAnalysis/stratified-attrs-indexing.ll
+++ b/test/Analysis/CFLAliasAnalysis/stratified-attrs-indexing.ll
@@ -18,7 +18,7 @@ define void @test(i1 %cond,
i32* %arg31, i32* %arg32, i32* %arg33, i32* %arg34, i32* %arg35) {
; CHECK: 946 Total Alias Queries Performed
- ; CHECK: 810 no alias responses (85.6%)
+ ; CHECK: 43 no alias responses (4.5%)
%a = alloca i32, align 4
%b = select i1 %cond, i32* %arg35, i32* %arg34
%c = select i1 %cond, i32* %arg34, i32* %arg33
diff --git a/test/Analysis/CostModel/AArch64/store.ll b/test/Analysis/CostModel/AArch64/store.ll
index 0c9883c..307f8f8 100644
--- a/test/Analysis/CostModel/AArch64/store.ll
+++ b/test/Analysis/CostModel/AArch64/store.ll
@@ -14,9 +14,9 @@ define void @store() {
; CHECK: cost of 64 {{.*}} store
store <4 x i8> undef, <4 x i8> * undef
; CHECK: cost of 16 {{.*}} load
- load <2 x i8> * undef
+ load <2 x i8> , <2 x i8> * undef
; CHECK: cost of 64 {{.*}} load
- load <4 x i8> * undef
+ load <4 x i8> , <4 x i8> * undef
ret void
}
diff --git a/test/Analysis/CostModel/ARM/gep.ll b/test/Analysis/CostModel/ARM/gep.ll
index a63b87d..624ca11 100644
--- a/test/Analysis/CostModel/ARM/gep.ll
+++ b/test/Analysis/CostModel/ARM/gep.ll
@@ -6,37 +6,37 @@ target triple = "thumbv7-apple-ios6.0.0"
define void @test_geps() {
; Cost of scalar integer geps should be one. We can't always expect it to be
; folded into the instruction addressing mode.
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i8*
- %a0 = getelementptr inbounds i8* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i16*
- %a1 = getelementptr inbounds i16* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i32*
- %a2 = getelementptr inbounds i32* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i8, i8*
+ %a0 = getelementptr inbounds i8, i8* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i16, i16*
+ %a1 = getelementptr inbounds i16, i16* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i32, i32*
+ %a2 = getelementptr inbounds i32, i32* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i64*
- %a3 = getelementptr inbounds i64* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i64, i64*
+ %a3 = getelementptr inbounds i64, i64* undef, i32 0
; Cost of scalar floating point geps should be one. We cannot fold the address
; computation.
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds float*
- %a4 = getelementptr inbounds float* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds double*
- %a5 = getelementptr inbounds double* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds float, float*
+ %a4 = getelementptr inbounds float, float* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds double, double*
+ %a5 = getelementptr inbounds double, double* undef, i32 0
; Cost of vector geps should be one. We cannot fold the address computation.
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i8>*
- %a7 = getelementptr inbounds <4 x i8>* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i16>*
- %a8 = getelementptr inbounds <4 x i16>* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i32>*
- %a9 = getelementptr inbounds <4 x i32>* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i64>*
- %a10 = getelementptr inbounds <4 x i64>* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x float>*
- %a11 = getelementptr inbounds <4 x float>* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x double>*
- %a12 = getelementptr inbounds <4 x double>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i8>, <4 x i8>*
+ %a7 = getelementptr inbounds <4 x i8>, <4 x i8>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i16>, <4 x i16>*
+ %a8 = getelementptr inbounds <4 x i16>, <4 x i16>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i32>, <4 x i32>*
+ %a9 = getelementptr inbounds <4 x i32>, <4 x i32>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i64>, <4 x i64>*
+ %a10 = getelementptr inbounds <4 x i64>, <4 x i64>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x float>, <4 x float>*
+ %a11 = getelementptr inbounds <4 x float>, <4 x float>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x double>, <4 x double>*
+ %a12 = getelementptr inbounds <4 x double>, <4 x double>* undef, i32 0
ret void
diff --git a/test/Analysis/CostModel/ARM/insertelement.ll b/test/Analysis/CostModel/ARM/insertelement.ll
index f951b08..bd1467e 100644
--- a/test/Analysis/CostModel/ARM/insertelement.ll
+++ b/test/Analysis/CostModel/ARM/insertelement.ll
@@ -10,8 +10,8 @@ target triple = "thumbv7-apple-ios6.0.0"
; CHECK: insertelement_i8
define void @insertelement_i8(%T_i8* %saddr,
%T_i8v* %vaddr) {
- %v0 = load %T_i8v* %vaddr
- %v1 = load %T_i8* %saddr
+ %v0 = load %T_i8v, %T_i8v* %vaddr
+ %v1 = load %T_i8, %T_i8* %saddr
;CHECK: estimated cost of 3 for {{.*}} insertelement <8 x i8>
%v2 = insertelement %T_i8v %v0, %T_i8 %v1, i32 1
store %T_i8v %v2, %T_i8v* %vaddr
@@ -24,8 +24,8 @@ define void @insertelement_i8(%T_i8* %saddr,
; CHECK: insertelement_i16
define void @insertelement_i16(%T_i16* %saddr,
%T_i16v* %vaddr) {
- %v0 = load %T_i16v* %vaddr
- %v1 = load %T_i16* %saddr
+ %v0 = load %T_i16v, %T_i16v* %vaddr
+ %v1 = load %T_i16, %T_i16* %saddr
;CHECK: estimated cost of 3 for {{.*}} insertelement <4 x i16>
%v2 = insertelement %T_i16v %v0, %T_i16 %v1, i32 1
store %T_i16v %v2, %T_i16v* %vaddr
@@ -37,8 +37,8 @@ define void @insertelement_i16(%T_i16* %saddr,
; CHECK: insertelement_i32
define void @insertelement_i32(%T_i32* %saddr,
%T_i32v* %vaddr) {
- %v0 = load %T_i32v* %vaddr
- %v1 = load %T_i32* %saddr
+ %v0 = load %T_i32v, %T_i32v* %vaddr
+ %v1 = load %T_i32, %T_i32* %saddr
;CHECK: estimated cost of 3 for {{.*}} insertelement <2 x i32>
%v2 = insertelement %T_i32v %v0, %T_i32 %v1, i32 1
store %T_i32v %v2, %T_i32v* %vaddr
diff --git a/test/Analysis/CostModel/PowerPC/load_store.ll b/test/Analysis/CostModel/PowerPC/load_store.ll
index 368f0a7..1e50f16 100644
--- a/test/Analysis/CostModel/PowerPC/load_store.ll
+++ b/test/Analysis/CostModel/PowerPC/load_store.ll
@@ -19,26 +19,26 @@ define i32 @stores(i32 %arg) {
}
define i32 @loads(i32 %arg) {
; CHECK: cost of 1 {{.*}} load
- load i8* undef, align 4
+ load i8, i8* undef, align 4
; CHECK: cost of 1 {{.*}} load
- load i16* undef, align 4
+ load i16, i16* undef, align 4
; CHECK: cost of 1 {{.*}} load
- load i32* undef, align 4
+ load i32, i32* undef, align 4
; CHECK: cost of 2 {{.*}} load
- load i64* undef, align 4
+ load i64, i64* undef, align 4
; CHECK: cost of 4 {{.*}} load
- load i128* undef, align 4
+ load i128, i128* undef, align 4
; FIXME: There actually are sub-vector Altivec loads, and so we could handle
; this with a small expense, but we don't currently.
; CHECK: cost of 48 {{.*}} load
- load <4 x i16>* undef, align 2
+ load <4 x i16>, <4 x i16>* undef, align 2
; CHECK: cost of 1 {{.*}} load
- load <4 x i32>* undef, align 4
+ load <4 x i32>, <4 x i32>* undef, align 4
; CHECK: cost of 46 {{.*}} load
- load <3 x float>* undef, align 1
+ load <3 x float>, <3 x float>* undef, align 1
ret i32 undef
}
diff --git a/test/Analysis/CostModel/X86/gep.ll b/test/Analysis/CostModel/X86/gep.ll
index 877184a..a4488ba 100644
--- a/test/Analysis/CostModel/X86/gep.ll
+++ b/test/Analysis/CostModel/X86/gep.ll
@@ -7,33 +7,33 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @test_geps() {
; Cost of should be zero. We expect it to be folded into
; the instruction addressing mode.
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8*
- %a0 = getelementptr inbounds i8* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16*
- %a1 = getelementptr inbounds i16* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32*
- %a2 = getelementptr inbounds i32* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64*
- %a3 = getelementptr inbounds i64* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8*
+ %a0 = getelementptr inbounds i8, i8* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16*
+ %a1 = getelementptr inbounds i16, i16* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32, i32*
+ %a2 = getelementptr inbounds i32, i32* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64, i64*
+ %a3 = getelementptr inbounds i64, i64* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds float*
- %a4 = getelementptr inbounds float* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds double*
- %a5 = getelementptr inbounds double* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds float, float*
+ %a4 = getelementptr inbounds float, float* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds double, double*
+ %a5 = getelementptr inbounds double, double* undef, i32 0
; Vector geps should also have zero cost.
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i8>*
- %a7 = getelementptr inbounds <4 x i8>* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i16>*
- %a8 = getelementptr inbounds <4 x i16>* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i32>*
- %a9 = getelementptr inbounds <4 x i32>* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i64>*
- %a10 = getelementptr inbounds <4 x i64>* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x float>*
- %a11 = getelementptr inbounds <4 x float>* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x double>*
- %a12 = getelementptr inbounds <4 x double>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i8>, <4 x i8>*
+ %a7 = getelementptr inbounds <4 x i8>, <4 x i8>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i16>, <4 x i16>*
+ %a8 = getelementptr inbounds <4 x i16>, <4 x i16>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i32>, <4 x i32>*
+ %a9 = getelementptr inbounds <4 x i32>, <4 x i32>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i64>, <4 x i64>*
+ %a10 = getelementptr inbounds <4 x i64>, <4 x i64>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x float>, <4 x float>*
+ %a11 = getelementptr inbounds <4 x float>, <4 x float>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x double>, <4 x double>*
+ %a12 = getelementptr inbounds <4 x double>, <4 x double>* undef, i32 0
ret void
diff --git a/test/Analysis/CostModel/X86/intrinsic-cost.ll b/test/Analysis/CostModel/X86/intrinsic-cost.ll
index 3b27b52..efc1263 100644
--- a/test/Analysis/CostModel/X86/intrinsic-cost.ll
+++ b/test/Analysis/CostModel/X86/intrinsic-cost.ll
@@ -9,9 +9,9 @@ vector.ph:
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds float* %f, i64 %index
+ %0 = getelementptr inbounds float, float* %f, i64 %index
%1 = bitcast float* %0 to <4 x float>*
- %wide.load = load <4 x float>* %1, align 4
+ %wide.load = load <4 x float>, <4 x float>* %1, align 4
%2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
store <4 x float> %2, <4 x float>* %1, align 4
%index.next = add i64 %index, 4
@@ -22,7 +22,7 @@ for.end: ; preds = %vector.body
ret void
; CORE2: Printing analysis 'Cost Model Analysis' for function 'test1':
-; CORE2: Cost Model: Found an estimated cost of 400 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
+; CORE2: Cost Model: Found an estimated cost of 46 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
; COREI7: Printing analysis 'Cost Model Analysis' for function 'test1':
; COREI7: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
@@ -37,9 +37,9 @@ vector.ph:
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds float* %f, i64 %index
+ %0 = getelementptr inbounds float, float* %f, i64 %index
%1 = bitcast float* %0 to <4 x float>*
- %wide.load = load <4 x float>* %1, align 4
+ %wide.load = load <4 x float>, <4 x float>* %1, align 4
%2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
store <4 x float> %2, <4 x float>* %1, align 4
%index.next = add i64 %index, 4
@@ -50,7 +50,7 @@ for.end: ; preds = %vector.body
ret void
; CORE2: Printing analysis 'Cost Model Analysis' for function 'test2':
-; CORE2: Cost Model: Found an estimated cost of 400 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
+; CORE2: Cost Model: Found an estimated cost of 46 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
; COREI7: Printing analysis 'Cost Model Analysis' for function 'test2':
; COREI7: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
@@ -65,9 +65,9 @@ vector.ph:
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds float* %f, i64 %index
+ %0 = getelementptr inbounds float, float* %f, i64 %index
%1 = bitcast float* %0 to <4 x float>*
- %wide.load = load <4 x float>* %1, align 4
+ %wide.load = load <4 x float>, <4 x float>* %1, align 4
%2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %wide.load, <4 x float> %b, <4 x float> %c)
store <4 x float> %2, <4 x float>* %1, align 4
%index.next = add i64 %index, 4
diff --git a/test/Analysis/CostModel/X86/load_store.ll b/test/Analysis/CostModel/X86/load_store.ll
index a53d0bd..ccf110a 100644
--- a/test/Analysis/CostModel/X86/load_store.ll
+++ b/test/Analysis/CostModel/X86/load_store.ll
@@ -34,49 +34,49 @@ define i32 @stores(i32 %arg) {
}
define i32 @loads(i32 %arg) {
;CHECK: cost of 1 {{.*}} load
- load i8* undef, align 4
+ load i8, i8* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load i16* undef, align 4
+ load i16, i16* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load i32* undef, align 4
+ load i32, i32* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load i64* undef, align 4
+ load i64, i64* undef, align 4
;CHECK: cost of 2 {{.*}} load
- load i128* undef, align 4
+ load i128, i128* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load <2 x i32>* undef, align 4
+ load <2 x i32>, <2 x i32>* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load <4 x i32>* undef, align 4
+ load <4 x i32>, <4 x i32>* undef, align 4
;CHECK: cost of 2 {{.*}} load
- load <8 x i32>* undef, align 4
+ load <8 x i32>, <8 x i32>* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load <2 x i64>* undef, align 4
+ load <2 x i64>, <2 x i64>* undef, align 4
;CHECK: cost of 2 {{.*}} load
- load <4 x i64>* undef, align 4
+ load <4 x i64>, <4 x i64>* undef, align 4
;CHECK: cost of 4 {{.*}} load
- load <8 x i64>* undef, align 4
+ load <8 x i64>, <8 x i64>* undef, align 4
;CHECK: cost of 3 {{.*}} load
- load <3 x float>* undef, align 4
+ load <3 x float>, <3 x float>* undef, align 4
;CHECK: cost of 3 {{.*}} load
- load <3 x double>* undef, align 4
+ load <3 x double>, <3 x double>* undef, align 4
;CHECK: cost of 3 {{.*}} load
- load <3 x i32>* undef, align 4
+ load <3 x i32>, <3 x i32>* undef, align 4
;CHECK: cost of 3 {{.*}} load
- load <3 x i64>* undef, align 4
+ load <3 x i64>, <3 x i64>* undef, align 4
;CHECK: cost of 10 {{.*}} load
- load <5 x i32>* undef, align 4
+ load <5 x i32>, <5 x i32>* undef, align 4
;CHECK: cost of 10 {{.*}} load
- load <5 x i64>* undef, align 4
+ load <5 x i64>, <5 x i64>* undef, align 4
ret i32 undef
}
diff --git a/test/Analysis/CostModel/X86/loop_v2.ll b/test/Analysis/CostModel/X86/loop_v2.ll
index 348444e..9283310 100644
--- a/test/Analysis/CostModel/X86/loop_v2.ll
+++ b/test/Analysis/CostModel/X86/loop_v2.ll
@@ -10,20 +10,20 @@ vector.ph:
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
%vec.phi = phi <2 x i32> [ zeroinitializer, %vector.ph ], [ %12, %vector.body ]
- %0 = getelementptr inbounds i32* %A, i64 %index
+ %0 = getelementptr inbounds i32, i32* %A, i64 %index
%1 = bitcast i32* %0 to <2 x i32>*
- %2 = load <2 x i32>* %1, align 4
+ %2 = load <2 x i32>, <2 x i32>* %1, align 4
%3 = sext <2 x i32> %2 to <2 x i64>
;CHECK: cost of 1 {{.*}} extract
%4 = extractelement <2 x i64> %3, i32 0
- %5 = getelementptr inbounds i32* %A, i64 %4
+ %5 = getelementptr inbounds i32, i32* %A, i64 %4
;CHECK: cost of 1 {{.*}} extract
%6 = extractelement <2 x i64> %3, i32 1
- %7 = getelementptr inbounds i32* %A, i64 %6
- %8 = load i32* %5, align 4
+ %7 = getelementptr inbounds i32, i32* %A, i64 %6
+ %8 = load i32, i32* %5, align 4
;CHECK: cost of 1 {{.*}} insert
%9 = insertelement <2 x i32> undef, i32 %8, i32 0
- %10 = load i32* %7, align 4
+ %10 = load i32, i32* %7, align 4
;CHECK: cost of 1 {{.*}} insert
%11 = insertelement <2 x i32> %9, i32 %10, i32 1
%12 = add nsw <2 x i32> %11, %vec.phi
diff --git a/test/Analysis/CostModel/X86/testshiftlshr.ll b/test/Analysis/CostModel/X86/testshiftlshr.ll
index 7bc8d89..78bf0a6 100644
--- a/test/Analysis/CostModel/X86/testshiftlshr.ll
+++ b/test/Analysis/CostModel/X86/testshiftlshr.ll
@@ -7,7 +7,7 @@ entry:
; SSE2: shift2i16
; SSE2: cost of 20 {{.*}} lshr
; SSE2-CODEGEN: shift2i16
- ; SSE2-CODEGEN: shrq %cl
+ ; SSE2-CODEGEN: psrlq
%0 = lshr %shifttype %a , %b
ret %shifttype %0
@@ -67,7 +67,7 @@ entry:
; SSE2: shift2i32
; SSE2: cost of 20 {{.*}} lshr
; SSE2-CODEGEN: shift2i32
- ; SSE2-CODEGEN: shrq %cl
+ ; SSE2-CODEGEN: psrlq
%0 = lshr %shifttype2i32 %a , %b
ret %shifttype2i32 %0
@@ -127,7 +127,7 @@ entry:
; SSE2: shift2i64
; SSE2: cost of 20 {{.*}} lshr
; SSE2-CODEGEN: shift2i64
- ; SSE2-CODEGEN: shrq %cl
+ ; SSE2-CODEGEN: psrlq
%0 = lshr %shifttype2i64 %a , %b
ret %shifttype2i64 %0
@@ -139,7 +139,7 @@ entry:
; SSE2: shift4i64
; SSE2: cost of 40 {{.*}} lshr
; SSE2-CODEGEN: shift4i64
- ; SSE2-CODEGEN: shrq %cl
+ ; SSE2-CODEGEN: psrlq
%0 = lshr %shifttype4i64 %a , %b
ret %shifttype4i64 %0
@@ -151,7 +151,7 @@ entry:
; SSE2: shift8i64
; SSE2: cost of 80 {{.*}} lshr
; SSE2-CODEGEN: shift8i64
- ; SSE2-CODEGEN: shrq %cl
+ ; SSE2-CODEGEN: psrlq
%0 = lshr %shifttype8i64 %a , %b
ret %shifttype8i64 %0
@@ -163,7 +163,7 @@ entry:
; SSE2: shift16i64
; SSE2: cost of 160 {{.*}} lshr
; SSE2-CODEGEN: shift16i64
- ; SSE2-CODEGEN: shrq %cl
+ ; SSE2-CODEGEN: psrlq
%0 = lshr %shifttype16i64 %a , %b
ret %shifttype16i64 %0
@@ -175,7 +175,7 @@ entry:
; SSE2: shift32i64
; SSE2: cost of 320 {{.*}} lshr
; SSE2-CODEGEN: shift32i64
- ; SSE2-CODEGEN: shrq %cl
+ ; SSE2-CODEGEN: psrlq
%0 = lshr %shifttype32i64 %a , %b
ret %shifttype32i64 %0
@@ -187,7 +187,7 @@ entry:
; SSE2: shift2i8
; SSE2: cost of 20 {{.*}} lshr
; SSE2-CODEGEN: shift2i8
- ; SSE2-CODEGEN: shrq %cl
+ ; SSE2-CODEGEN: psrlq
%0 = lshr %shifttype2i8 %a , %b
ret %shifttype2i8 %0
diff --git a/test/Analysis/CostModel/X86/testshiftshl.ll b/test/Analysis/CostModel/X86/testshiftshl.ll
index 40effd0..c36e0f5 100644
--- a/test/Analysis/CostModel/X86/testshiftshl.ll
+++ b/test/Analysis/CostModel/X86/testshiftshl.ll
@@ -7,7 +7,7 @@ entry:
; SSE2: shift2i16
; SSE2: cost of 20 {{.*}} shl
; SSE2-CODEGEN: shift2i16
- ; SSE2-CODEGEN: shlq %cl
+ ; SSE2-CODEGEN: psllq
%0 = shl %shifttype %a , %b
ret %shifttype %0
@@ -67,7 +67,7 @@ entry:
; SSE2: shift2i32
; SSE2: cost of 20 {{.*}} shl
; SSE2-CODEGEN: shift2i32
- ; SSE2-CODEGEN: shlq %cl
+ ; SSE2-CODEGEN: psllq
%0 = shl %shifttype2i32 %a , %b
ret %shifttype2i32 %0
@@ -127,7 +127,7 @@ entry:
; SSE2: shift2i64
; SSE2: cost of 20 {{.*}} shl
; SSE2-CODEGEN: shift2i64
- ; SSE2-CODEGEN: shlq %cl
+ ; SSE2-CODEGEN: psllq
%0 = shl %shifttype2i64 %a , %b
ret %shifttype2i64 %0
@@ -139,7 +139,7 @@ entry:
; SSE2: shift4i64
; SSE2: cost of 40 {{.*}} shl
; SSE2-CODEGEN: shift4i64
- ; SSE2-CODEGEN: shlq %cl
+ ; SSE2-CODEGEN: psllq
%0 = shl %shifttype4i64 %a , %b
ret %shifttype4i64 %0
@@ -151,7 +151,7 @@ entry:
; SSE2: shift8i64
; SSE2: cost of 80 {{.*}} shl
; SSE2-CODEGEN: shift8i64
- ; SSE2-CODEGEN: shlq %cl
+ ; SSE2-CODEGEN: psllq
%0 = shl %shifttype8i64 %a , %b
ret %shifttype8i64 %0
@@ -163,7 +163,7 @@ entry:
; SSE2: shift16i64
; SSE2: cost of 160 {{.*}} shl
; SSE2-CODEGEN: shift16i64
- ; SSE2-CODEGEN: shlq %cl
+ ; SSE2-CODEGEN: psllq
%0 = shl %shifttype16i64 %a , %b
ret %shifttype16i64 %0
@@ -175,7 +175,7 @@ entry:
; SSE2: shift32i64
; SSE2: cost of 320 {{.*}} shl
; SSE2-CODEGEN: shift32i64
- ; SSE2-CODEGEN: shlq %cl
+ ; SSE2-CODEGEN: psllq
%0 = shl %shifttype32i64 %a , %b
ret %shifttype32i64 %0
@@ -187,7 +187,7 @@ entry:
; SSE2: shift2i8
; SSE2: cost of 20 {{.*}} shl
; SSE2-CODEGEN: shift2i8
- ; SSE2-CODEGEN: shlq %cl
+ ; SSE2-CODEGEN: psllq
%0 = shl %shifttype2i8 %a , %b
ret %shifttype2i8 %0
diff --git a/test/Analysis/CostModel/X86/vectorized-loop.ll b/test/Analysis/CostModel/X86/vectorized-loop.ll
index af7d1df..2dd52a0 100644
--- a/test/Analysis/CostModel/X86/vectorized-loop.ll
+++ b/test/Analysis/CostModel/X86/vectorized-loop.ll
@@ -25,17 +25,17 @@ for.body.lr.ph: ; preds = %entry
vector.body: ; preds = %for.body.lr.ph, %vector.body
%index = phi i64 [ %index.next, %vector.body ], [ %0, %for.body.lr.ph ]
%3 = add i64 %index, 2
- %4 = getelementptr inbounds i32* %B, i64 %3
+ %4 = getelementptr inbounds i32, i32* %B, i64 %3
;CHECK: cost of 0 {{.*}} bitcast
%5 = bitcast i32* %4 to <8 x i32>*
;CHECK: cost of 2 {{.*}} load
- %6 = load <8 x i32>* %5, align 4
+ %6 = load <8 x i32>, <8 x i32>* %5, align 4
;CHECK: cost of 4 {{.*}} mul
%7 = mul nsw <8 x i32> %6, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
- %8 = getelementptr inbounds i32* %A, i64 %index
+ %8 = getelementptr inbounds i32, i32* %A, i64 %index
%9 = bitcast i32* %8 to <8 x i32>*
;CHECK: cost of 2 {{.*}} load
- %10 = load <8 x i32>* %9, align 4
+ %10 = load <8 x i32>, <8 x i32>* %9, align 4
;CHECK: cost of 4 {{.*}} add
%11 = add nsw <8 x i32> %10, %7
;CHECK: cost of 2 {{.*}} store
@@ -52,14 +52,14 @@ middle.block: ; preds = %vector.body, %for.b
for.body: ; preds = %middle.block, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %end.idx.rnd.down, %middle.block ]
%13 = add nsw i64 %indvars.iv, 2
- %arrayidx = getelementptr inbounds i32* %B, i64 %13
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 %13
;CHECK: cost of 1 {{.*}} load
- %14 = load i32* %arrayidx, align 4
+ %14 = load i32, i32* %arrayidx, align 4
;CHECK: cost of 1 {{.*}} mul
%mul = mul nsw i32 %14, 5
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
;CHECK: cost of 1 {{.*}} load
- %15 = load i32* %arrayidx2, align 4
+ %15 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %15, %mul
store i32 %add3, i32* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/test/Analysis/Delinearization/a.ll b/test/Analysis/Delinearization/a.ll
index efebcc4..78bbfcf 100644
--- a/test/Analysis/Delinearization/a.ll
+++ b/test/Analysis/Delinearization/a.ll
@@ -9,7 +9,7 @@
; AddRec: {{{(28 + (4 * (-4 + (3 * %m)) * %o) + %A),+,(8 * %m * %o)}<%for.i>,+,(12 * %o)}<%for.j>,+,20}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(i32) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 4 bytes.
; CHECK: ArrayRef[{3,+,2}<%for.i>][{-4,+,3}<%for.j>][{7,+,5}<%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, i32* nocapture %A) #0 {
@@ -52,7 +52,7 @@ for.k: ; preds = %for.k, %for.j
%mul.us.us = mul nsw i64 %k.029.us.us, 5
%arrayidx.sum.us.us = add i64 %mul.us.us, 7
%arrayidx10.sum.us.us = add i64 %arrayidx.sum.us.us, %tmp27.us.us
- %arrayidx11.us.us = getelementptr inbounds i32* %A, i64 %arrayidx10.sum.us.us
+ %arrayidx11.us.us = getelementptr inbounds i32, i32* %A, i64 %arrayidx10.sum.us.us
store i32 1, i32* %arrayidx11.us.us, align 4
%inc.us.us = add nsw i64 %k.029.us.us, 1
%exitcond = icmp eq i64 %inc.us.us, %o
diff --git a/test/Analysis/Delinearization/gcd_multiply_expr.ll b/test/Analysis/Delinearization/gcd_multiply_expr.ll
index f962f6d..c30a672 100644
--- a/test/Analysis/Delinearization/gcd_multiply_expr.ll
+++ b/test/Analysis/Delinearization/gcd_multiply_expr.ll
@@ -27,7 +27,7 @@
define i32 @fn2() {
entry:
- %.pr = load i32* @d, align 4
+ %.pr = load i32, i32* @d, align 4
%phitmp = icmp eq i32 %.pr, 0
br label %for.cond
@@ -36,11 +36,11 @@ for.cond:
br i1 %0, label %for.cond, label %for.cond2thread-pre-split.preheader.i
for.cond2thread-pre-split.preheader.i:
- %1 = load i32* @g, align 4
- %2 = load i32* @h, align 4
+ %1 = load i32, i32* @g, align 4
+ %2 = load i32, i32* @h, align 4
%mul = mul nsw i32 %2, %1
- %3 = load i8** @f, align 4
- %.pr.pre.i = load i32* @b, align 4
+ %3 = load i8*, i8** @f, align 4
+ %.pr.pre.i = load i32, i32* @b, align 4
br label %for.cond2thread-pre-split.i
for.cond2thread-pre-split.i:
@@ -64,57 +64,57 @@ for.body4.i.preheader:
for.body4.i:
%8 = phi i32 [ %inc.7.i, %for.body4.i ], [ %.pr.i, %for.body4.i.preheader ]
%arrayidx.sum1 = add i32 %add.i, %8
- %arrayidx.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum1
- %9 = load i8* %arrayidx.i, align 1
+ %arrayidx.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum1
+ %9 = load i8, i8* %arrayidx.i, align 1
%conv.i = sext i8 %9 to i32
store i32 %conv.i, i32* @c, align 4
%inc.i = add nsw i32 %8, 1
store i32 %inc.i, i32* @b, align 4
%arrayidx.sum2 = add i32 %add.i, %inc.i
- %arrayidx.1.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum2
- %10 = load i8* %arrayidx.1.i, align 1
+ %arrayidx.1.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum2
+ %10 = load i8, i8* %arrayidx.1.i, align 1
%conv.1.i = sext i8 %10 to i32
store i32 %conv.1.i, i32* @c, align 4
%inc.1.i = add nsw i32 %8, 2
store i32 %inc.1.i, i32* @b, align 4
%arrayidx.sum3 = add i32 %add.i, %inc.1.i
- %arrayidx.2.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum3
- %11 = load i8* %arrayidx.2.i, align 1
+ %arrayidx.2.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum3
+ %11 = load i8, i8* %arrayidx.2.i, align 1
%conv.2.i = sext i8 %11 to i32
store i32 %conv.2.i, i32* @c, align 4
%inc.2.i = add nsw i32 %8, 3
store i32 %inc.2.i, i32* @b, align 4
%arrayidx.sum4 = add i32 %add.i, %inc.2.i
- %arrayidx.3.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum4
- %12 = load i8* %arrayidx.3.i, align 1
+ %arrayidx.3.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum4
+ %12 = load i8, i8* %arrayidx.3.i, align 1
%conv.3.i = sext i8 %12 to i32
store i32 %conv.3.i, i32* @c, align 4
%inc.3.i = add nsw i32 %8, 4
store i32 %inc.3.i, i32* @b, align 4
%arrayidx.sum5 = add i32 %add.i, %inc.3.i
- %arrayidx.4.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum5
- %13 = load i8* %arrayidx.4.i, align 1
+ %arrayidx.4.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum5
+ %13 = load i8, i8* %arrayidx.4.i, align 1
%conv.4.i = sext i8 %13 to i32
store i32 %conv.4.i, i32* @c, align 4
%inc.4.i = add nsw i32 %8, 5
store i32 %inc.4.i, i32* @b, align 4
%arrayidx.sum6 = add i32 %add.i, %inc.4.i
- %arrayidx.5.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum6
- %14 = load i8* %arrayidx.5.i, align 1
+ %arrayidx.5.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum6
+ %14 = load i8, i8* %arrayidx.5.i, align 1
%conv.5.i = sext i8 %14 to i32
store i32 %conv.5.i, i32* @c, align 4
%inc.5.i = add nsw i32 %8, 6
store i32 %inc.5.i, i32* @b, align 4
%arrayidx.sum7 = add i32 %add.i, %inc.5.i
- %arrayidx.6.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum7
- %15 = load i8* %arrayidx.6.i, align 1
+ %arrayidx.6.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum7
+ %15 = load i8, i8* %arrayidx.6.i, align 1
%conv.6.i = sext i8 %15 to i32
store i32 %conv.6.i, i32* @c, align 4
%inc.6.i = add nsw i32 %8, 7
store i32 %inc.6.i, i32* @b, align 4
%arrayidx.sum8 = add i32 %add.i, %inc.6.i
- %arrayidx.7.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum8
- %16 = load i8* %arrayidx.7.i, align 1
+ %arrayidx.7.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum8
+ %16 = load i8, i8* %arrayidx.7.i, align 1
%conv.7.i = sext i8 %16 to i32
store i32 %conv.7.i, i32* @c, align 4
%inc.7.i = add nsw i32 %8, 8
@@ -135,8 +135,8 @@ for.body4.ur.i.preheader:
for.body4.ur.i:
%20 = phi i32 [ %inc.ur.i, %for.body4.ur.i ], [ %.ph, %for.body4.ur.i.preheader ]
%arrayidx.sum = add i32 %add.i, %20
- %arrayidx.ur.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum
- %21 = load i8* %arrayidx.ur.i, align 1
+ %arrayidx.ur.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum
+ %21 = load i8, i8* %arrayidx.ur.i, align 1
%conv.ur.i = sext i8 %21 to i32
store i32 %conv.ur.i, i32* @c, align 4
%inc.ur.i = add nsw i32 %20, 1
diff --git a/test/Analysis/Delinearization/himeno_1.ll b/test/Analysis/Delinearization/himeno_1.ll
index c94ca7a..5c86f57 100644
--- a/test/Analysis/Delinearization/himeno_1.ll
+++ b/test/Analysis/Delinearization/himeno_1.ll
@@ -28,31 +28,31 @@
; AddRec: {{{(4 + (4 * (sext i32 %a.deps to i64) * (1 + (sext i32 %a.cols to i64))) + %a.base),+,(4 * (sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>,+,(4 * (sext i32 %a.deps to i64))}<%for.j>,+,4}<%for.k>
; CHECK: Base offset: %a.base
-; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of sizeof(float) bytes.
+; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of 4 bytes.
; CHECK: ArrayRef[{1,+,1}<nuw><nsw><%for.i>][{1,+,1}<nuw><nsw><%for.j>][{1,+,1}<nuw><nsw><%for.k>]
%struct.Mat = type { float*, i32, i32, i32, i32 }
define void @jacobi(i32 %nn, %struct.Mat* nocapture %a, %struct.Mat* nocapture %p) nounwind uwtable {
entry:
- %p.rows.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 2
- %p.rows = load i32* %p.rows.ptr
+ %p.rows.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 2
+ %p.rows = load i32, i32* %p.rows.ptr
%p.rows.sub = add i32 %p.rows, -1
%p.rows.sext = sext i32 %p.rows.sub to i64
- %p.cols.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 3
- %p.cols = load i32* %p.cols.ptr
+ %p.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 3
+ %p.cols = load i32, i32* %p.cols.ptr
%p.cols.sub = add i32 %p.cols, -1
%p.cols.sext = sext i32 %p.cols.sub to i64
- %p.deps.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 4
- %p.deps = load i32* %p.deps.ptr
+ %p.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 4
+ %p.deps = load i32, i32* %p.deps.ptr
%p.deps.sub = add i32 %p.deps, -1
%p.deps.sext = sext i32 %p.deps.sub to i64
- %a.cols.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 3
- %a.cols = load i32* %a.cols.ptr
- %a.deps.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 4
- %a.deps = load i32* %a.deps.ptr
- %a.base.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 0
- %a.base = load float** %a.base.ptr, align 8
+ %a.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 3
+ %a.cols = load i32, i32* %a.cols.ptr
+ %a.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 4
+ %a.deps = load i32, i32* %a.deps.ptr
+ %a.base.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 0
+ %a.base = load float*, float** %a.base.ptr, align 8
br label %for.i
for.i: ; preds = %for.i.inc, %entry
@@ -71,7 +71,7 @@ for.k: ; preds = %for.k, %for.j
%tmp2 = add i64 %tmp1, %j
%tmp3 = mul i64 %tmp2, %a.deps.sext
%tmp4 = add nsw i64 %k, %tmp3
- %arrayidx = getelementptr inbounds float* %a.base, i64 %tmp4
+ %arrayidx = getelementptr inbounds float, float* %a.base, i64 %tmp4
store float 1.000000e+00, float* %arrayidx
%k.inc = add nsw i64 %k, 1
%k.exitcond = icmp eq i64 %k.inc, %p.deps.sext
diff --git a/test/Analysis/Delinearization/himeno_2.ll b/test/Analysis/Delinearization/himeno_2.ll
index c256384..e1e7560 100644
--- a/test/Analysis/Delinearization/himeno_2.ll
+++ b/test/Analysis/Delinearization/himeno_2.ll
@@ -28,33 +28,33 @@
; AddRec: {{{(4 + (4 * (sext i32 %a.deps to i64) * (1 + (sext i32 %a.cols to i64))) + %a.base),+,(4 * (sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>,+,(4 * (sext i32 %a.deps to i64))}<%for.j>,+,4}<%for.k>
; CHECK: Base offset: %a.base
-; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of sizeof(float) bytes.
+; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of 4 bytes.
; CHECK: ArrayRef[{1,+,1}<nuw><nsw><%for.i>][{1,+,1}<nuw><nsw><%for.j>][{1,+,1}<nuw><nsw><%for.k>]
%struct.Mat = type { float*, i32, i32, i32, i32 }
define void @jacobi(i32 %nn, %struct.Mat* nocapture %a, %struct.Mat* nocapture %p) nounwind uwtable {
entry:
- %p.rows.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 2
- %p.rows = load i32* %p.rows.ptr
+ %p.rows.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 2
+ %p.rows = load i32, i32* %p.rows.ptr
%p.rows.sub = add i32 %p.rows, -1
%p.rows.sext = sext i32 %p.rows.sub to i64
- %p.cols.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 3
- %p.cols = load i32* %p.cols.ptr
+ %p.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 3
+ %p.cols = load i32, i32* %p.cols.ptr
%p.cols.sub = add i32 %p.cols, -1
%p.cols.sext = sext i32 %p.cols.sub to i64
- %p.deps.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 4
- %p.deps = load i32* %p.deps.ptr
+ %p.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 4
+ %p.deps = load i32, i32* %p.deps.ptr
%p.deps.sub = add i32 %p.deps, -1
%p.deps.sext = sext i32 %p.deps.sub to i64
- %a.cols.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 3
- %a.cols = load i32* %a.cols.ptr
+ %a.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 3
+ %a.cols = load i32, i32* %a.cols.ptr
%a.cols.sext = sext i32 %a.cols to i64
- %a.deps.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 4
- %a.deps = load i32* %a.deps.ptr
+ %a.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 4
+ %a.deps = load i32, i32* %a.deps.ptr
%a.deps.sext = sext i32 %a.deps to i64
- %a.base.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 0
- %a.base = load float** %a.base.ptr, align 8
+ %a.base.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 0
+ %a.base = load float*, float** %a.base.ptr, align 8
br label %for.i
for.i: ; preds = %for.i.inc, %entry
@@ -71,7 +71,7 @@ for.k: ; preds = %for.k, %for.j
%tmp2 = add i64 %tmp1, %j
%tmp3 = mul i64 %tmp2, %a.deps.sext
%tmp4 = add nsw i64 %k, %tmp3
- %arrayidx = getelementptr inbounds float* %a.base, i64 %tmp4
+ %arrayidx = getelementptr inbounds float, float* %a.base, i64 %tmp4
store float 1.000000e+00, float* %arrayidx
%k.inc = add nsw i64 %k, 1
%k.exitcond = icmp eq i64 %k.inc, %p.deps.sext
diff --git a/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll b/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll
index 01a4b96..0c893bf 100644
--- a/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll
+++ b/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll
@@ -8,9 +8,9 @@
; A[2i+b][2j] = 1.0;
; }
-; AddRec: {{((%m * %b * sizeof(double)) + %A),+,(2 * %m * sizeof(double))}<%for.i>,+,(2 * sizeof(double))}<%for.j>
+; AddRec: {{((%m * %b * 8) + %A),+,(2 * %m * 8)}<%for.i>,+,(2 * 8)}<%for.j>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m] with elements of 8 bytes.
; CHECK: ArrayRef[{%b,+,2}<%for.i>][{0,+,2}<%for.j>]
@@ -29,7 +29,7 @@ for.j:
%j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ]
%prodj = mul i64 %j, 2
%vlaarrayidx.sum = add i64 %prodj, %tmp
- %arrayidx = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %vlaarrayidx.sum
store double 1.0, double* %arrayidx
%j.inc = add nsw i64 %j, 1
%j.exitcond = icmp eq i64 %j.inc, %m
diff --git a/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll b/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll
index ae80ebc..317e62c 100644
--- a/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll
+++ b/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll
@@ -10,7 +10,7 @@
; AddRec: {{{(56 + (8 * (-4 + (3 * %m)) * %o) + %A),+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes.
; CHECK: ArrayRef[{3,+,1}<nw><%for.i>][{-4,+,1}<nw><%for.j>][{7,+,1}<nw><%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, double* %A) {
@@ -34,7 +34,7 @@ for.k:
%subscript2 = mul i64 %subscript1, %o
%offset2 = add nsw i64 %k, 7
%subscript = add i64 %subscript2, %offset2
- %idx = getelementptr inbounds double* %A, i64 %subscript
+ %idx = getelementptr inbounds double, double* %A, i64 %subscript
store double 1.0, double* %idx
br label %for.k.inc
diff --git a/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll b/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll
index 75080da..ada7758 100644
--- a/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll
+++ b/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll
@@ -10,7 +10,7 @@
; AddRec: {{{(56 + (8 * (-4 + (3 * %m)) * (%o + %p)) + %A),+,(8 * (%o + %p) * %m)}<%for.cond4.preheader.lr.ph.us>,+,(8 * (%o + %p))}<%for.body6.lr.ph.us.us>,+,8}<%for.body6.us.us>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][(%o + %p)] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][(%o + %p)] with elements of 8 bytes.
; CHECK: ArrayRef[{3,+,1}<nw><%for.cond4.preheader.lr.ph.us>][{-4,+,1}<nw><%for.body6.lr.ph.us.us>][{7,+,1}<nw><%for.body6.us.us>]
define void @foo(i64 %n, i64 %m, i64 %o, i64 %p, double* nocapture %A) nounwind uwtable {
@@ -51,7 +51,7 @@ for.body6.us.us: ; preds = %for.body6.us.us, %f
%k.019.us.us = phi i64 [ 0, %for.body6.lr.ph.us.us ], [ %inc.us.us, %for.body6.us.us ]
%arrayidx.sum.us.us = add i64 %k.019.us.us, 7
%arrayidx9.sum.us.us = add i64 %arrayidx.sum.us.us, %tmp17.us.us
- %arrayidx10.us.us = getelementptr inbounds double* %A, i64 %arrayidx9.sum.us.us
+ %arrayidx10.us.us = getelementptr inbounds double, double* %A, i64 %arrayidx9.sum.us.us
store double 1.000000e+00, double* %arrayidx10.us.us, align 8
%inc.us.us = add nsw i64 %k.019.us.us, 1
%exitcond = icmp eq i64 %inc.us.us, %o
diff --git a/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll b/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll
index e921444..9e37b76 100644
--- a/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll
+++ b/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll
@@ -10,7 +10,7 @@
; AddRec: {{{((8 * ((((%m * %p) + %q) * %o) + %r)) + %A),+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes.
; CHECK: ArrayRef[{%p,+,1}<nw><%for.i>][{%q,+,1}<nw><%for.j>][{%r,+,1}<nw><%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, double* %A, i64 %p, i64 %q, i64 %r) {
@@ -34,7 +34,7 @@ for.k:
%subscript2 = mul i64 %subscript1, %o
%offset2 = add nsw i64 %k, %r
%subscript = add i64 %subscript2, %offset2
- %idx = getelementptr inbounds double* %A, i64 %subscript
+ %idx = getelementptr inbounds double, double* %A, i64 %subscript
store double 1.0, double* %idx
br label %for.k.inc
diff --git a/test/Analysis/Delinearization/multidim_only_ivs_2d.ll b/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
index 5a88c4c..66e2348 100644
--- a/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
+++ b/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
@@ -8,18 +8,18 @@
; A[i][j] = 1.0;
; }
-; Inst: %val = load double* %arrayidx
+; Inst: %val = load double, double* %arrayidx
; In Loop with Header: for.j
-; AddRec: {{0,+,(%m * sizeof(double))}<%for.i>,+,sizeof(double)}<%for.j>
+; AddRec: {{0,+,(%m * 8)}<%for.i>,+,8}<%for.j>
; Base offset: %A
-; ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; ArrayDecl[UnknownSize][%m] with elements of 8 bytes.
; ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>]
; Inst: store double %val, double* %arrayidx
; In Loop with Header: for.j
; AddRec: {{%A,+,(8 * %m)}<%for.i>,+,8}<%for.j>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m] with elements of 8 bytes.
; CHECK: ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>]
define void @foo(i64 %n, i64 %m, double* %A) {
@@ -34,8 +34,8 @@ for.i:
for.j:
%j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ]
%vlaarrayidx.sum = add i64 %j, %tmp
- %arrayidx = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum
- %val = load double* %arrayidx
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %vlaarrayidx.sum
+ %val = load double, double* %arrayidx
store double %val, double* %arrayidx
%j.inc = add nsw i64 %j, 1
%j.exitcond = icmp eq i64 %j.inc, %m
diff --git a/test/Analysis/Delinearization/multidim_only_ivs_2d_nested.ll b/test/Analysis/Delinearization/multidim_only_ivs_2d_nested.ll
index 810188f..1ca18c1 100644
--- a/test/Analysis/Delinearization/multidim_only_ivs_2d_nested.ll
+++ b/test/Analysis/Delinearization/multidim_only_ivs_2d_nested.ll
@@ -53,7 +53,7 @@ for.body9.lr.ph.us.us: ; preds = %for.cond7.preheader
for.body9.us.us: ; preds = %for.body9.us.us, %for.body9.lr.ph.us.us
%j.021.us.us = phi i64 [ 0, %for.body9.lr.ph.us.us ], [ %inc.us.us, %for.body9.us.us ]
%arrayidx.sum.us.us = add i64 %j.021.us.us, %0
- %arrayidx10.us.us = getelementptr inbounds double* %vla.us, i64 %arrayidx.sum.us.us
+ %arrayidx10.us.us = getelementptr inbounds double, double* %vla.us, i64 %arrayidx.sum.us.us
store double 1.000000e+00, double* %arrayidx10.us.us, align 8
%inc.us.us = add nsw i64 %j.021.us.us, 1
%exitcond50 = icmp eq i64 %inc.us.us, %indvars.iv48
diff --git a/test/Analysis/Delinearization/multidim_only_ivs_3d.ll b/test/Analysis/Delinearization/multidim_only_ivs_3d.ll
index aad0f09..8cb7ad5 100644
--- a/test/Analysis/Delinearization/multidim_only_ivs_3d.ll
+++ b/test/Analysis/Delinearization/multidim_only_ivs_3d.ll
@@ -10,7 +10,7 @@
; AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes.
; CHECK: ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>][{0,+,1}<nuw><nsw><%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, double* %A) {
@@ -31,7 +31,7 @@ for.k:
%subscript1 = add i64 %j, %subscript0
%subscript2 = mul i64 %subscript1, %o
%subscript = add i64 %subscript2, %k
- %idx = getelementptr inbounds double* %A, i64 %subscript
+ %idx = getelementptr inbounds double, double* %A, i64 %subscript
store double 1.0, double* %idx
br label %for.k.inc
diff --git a/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll b/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll
index 9e406d1..e08d1b9 100644
--- a/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll
+++ b/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll
@@ -38,7 +38,7 @@ for.k:
%tmp.us.us = add i64 %j, %tmp
%tmp17.us.us = mul i64 %tmp.us.us, %n_zext
%subscript = add i64 %tmp17.us.us, %k
- %idx = getelementptr inbounds double* %A, i64 %subscript
+ %idx = getelementptr inbounds double, double* %A, i64 %subscript
store double 1.0, double* %idx
br label %for.k.inc
diff --git a/test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll b/test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll
index 6a98507..6775bba 100644
--- a/test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll
+++ b/test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll
@@ -23,11 +23,11 @@ for.j:
%j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ]
%tmp = mul nsw i64 %i, %m
%vlaarrayidx.sum = add i64 %j, %tmp
- %arrayidx = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %vlaarrayidx.sum
store double 1.0, double* %arrayidx
%tmp1 = mul nsw i64 %j, %n
%vlaarrayidx.sum1 = add i64 %i, %tmp1
- %arrayidx1 = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum1
+ %arrayidx1 = getelementptr inbounds double, double* %A, i64 %vlaarrayidx.sum1
store double 1.0, double* %arrayidx1
%j.inc = add nsw i64 %j, 1
%j.exitcond = icmp eq i64 %j.inc, %m
diff --git a/test/Analysis/Delinearization/undef.ll b/test/Analysis/Delinearization/undef.ll
index 8ee64e3..399ff27 100644
--- a/test/Analysis/Delinearization/undef.ll
+++ b/test/Analysis/Delinearization/undef.ll
@@ -20,8 +20,8 @@ for.body60:
%tmp5 = add i64 %iy.067, %0
%tmp6 = mul i64 %tmp5, undef
%arrayidx69.sum = add i64 undef, %tmp6
- %arrayidx70 = getelementptr inbounds double* %Ey, i64 %arrayidx69.sum
- %1 = load double* %arrayidx70, align 8
+ %arrayidx70 = getelementptr inbounds double, double* %Ey, i64 %arrayidx69.sum
+ %1 = load double, double* %arrayidx70, align 8
%inc = add nsw i64 %ix.062, 1
br i1 false, label %for.body60, label %for.end
diff --git a/test/Analysis/DependenceAnalysis/Banerjee.ll b/test/Analysis/DependenceAnalysis/Banerjee.ll
index 883a06d..84459b2 100644
--- a/test/Analysis/DependenceAnalysis/Banerjee.ll
+++ b/test/Analysis/DependenceAnalysis/Banerjee.ll
@@ -40,21 +40,21 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%sub = add nsw i64 %add5, -1
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub
- %0 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
+ %0 = load i64, i64* %arrayidx6, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 11
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 11
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
@@ -109,21 +109,21 @@ for.body3: ; preds = %for.body3.preheader
%B.addr.12 = phi i64* [ %incdec.ptr, %for.body3 ], [ %B.addr.06, %for.body3.preheader ]
%mul = mul nsw i64 %i.05, 10
%add = add nsw i64 %mul, %j.03
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.05, 10
%add5 = add nsw i64 %mul4, %j.03
%sub = add nsw i64 %add5, -1
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub
- %2 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.12, i64 1
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
+ %2 = load i64, i64* %arrayidx6, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.12, i64 1
store i64 %2, i64* %B.addr.12, align 8
%inc = add nsw i64 %j.03, 1
%exitcond = icmp eq i64 %inc, %1
br i1 %exitcond, label %for.inc7.loopexit, label %for.body3
for.inc7.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.06, i64 %m
+ %scevgep = getelementptr i64, i64* %B.addr.06, i64 %m
br label %for.inc7
for.inc7: ; preds = %for.inc7.loopexit, %for.cond1.preheader
@@ -175,21 +175,21 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 100
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
- %0 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
+ %0 = load i64, i64* %arrayidx7, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
@@ -234,21 +234,21 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 99
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
- %0 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
+ %0 = load i64, i64* %arrayidx7, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
@@ -293,21 +293,21 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%sub = add nsw i64 %add5, -100
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub
- %0 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
+ %0 = load i64, i64* %arrayidx6, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
@@ -352,21 +352,21 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%sub = add nsw i64 %add5, -99
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub
- %0 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
+ %0 = load i64, i64* %arrayidx6, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
@@ -411,21 +411,21 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 9
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
- %0 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
+ %0 = load i64, i64* %arrayidx7, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
@@ -470,21 +470,21 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 10
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
- %0 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
+ %0 = load i64, i64* %arrayidx7, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
@@ -529,21 +529,21 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 11
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
- %0 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
+ %0 = load i64, i64* %arrayidx7, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
@@ -589,21 +589,21 @@ for.body3: ; preds = %for.cond1.preheader
%mul = mul nsw i64 %i.03, 30
%mul4 = mul nsw i64 %j.02, 500
%add = add nsw i64 %mul, %mul4
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%0 = mul i64 %j.02, -500
%sub = add i64 %i.03, %0
%add6 = add nsw i64 %sub, 11
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
- %1 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
+ %1 = load i64, i64* %arrayidx7, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %1, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 20
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 20
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 20
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 20
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
@@ -648,21 +648,21 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %j.02, 500
%add = add nsw i64 %i.03, %mul
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%0 = mul i64 %j.02, -500
%sub = add i64 %i.03, %0
%add5 = add nsw i64 %sub, 11
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %add5
- %1 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5
+ %1 = load i64, i64* %arrayidx6, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %1, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 20
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 20
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 20
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 20
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
@@ -707,21 +707,21 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 300
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 250
%sub = sub nsw i64 %mul4, %j.02
%add5 = add nsw i64 %sub, 11
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %add5
- %0 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5
+ %0 = load i64, i64* %arrayidx6, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 20
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 20
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 20
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 20
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
@@ -766,21 +766,21 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 100
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 100
%sub = sub nsw i64 %mul4, %j.02
%add5 = add nsw i64 %sub, 11
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %add5
- %0 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5
+ %0 = load i64, i64* %arrayidx6, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 20
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 20
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 20
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 20
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
diff --git a/test/Analysis/DependenceAnalysis/Constraints.ll b/test/Analysis/DependenceAnalysis/Constraints.ll
new file mode 100644
index 0000000..42dfac7
--- /dev/null
+++ b/test/Analysis/DependenceAnalysis/Constraints.ll
@@ -0,0 +1,103 @@
+; RUN: opt < %s -analyze -basicaa -da
+;; Check that this code doesn't abort. Test case is reduced version of lnt Polybench benchmark test case dynprog.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@sum_c = common global [10 x [10 x [10 x i32]]] zeroinitializer
+@c = common global [10 x [10 x i32]] zeroinitializer
+@W = common global [10 x [10 x i32]] zeroinitializer
+@out_l = common global i32 0
+
+; Function Attrs: nounwind uwtable
+define void @dep_constraint_crash_test(i32 %M, i32 %N) {
+ %1 = icmp sgt i32 %N, 0
+ br i1 %1, label %.preheader.lr.ph, label %35
+
+.preheader.lr.ph: ; preds = %0
+ %2 = add nsw i32 %M, -2
+ %3 = icmp slt i32 %M, 2
+ %4 = add nsw i32 %M, -1
+ %5 = sext i32 %4 to i64
+ %6 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @c, i64 0, i64 0, i64 %5
+ %7 = add nsw i32 %M, -1
+ %out_l.promoted = load i32, i32* @out_l
+ %8 = sext i32 %7 to i64
+ %9 = sext i32 %2 to i64
+ br label %.preheader
+
+.preheader: ; preds = %._crit_edge7, %.preheader.lr.ph
+ %10 = phi i32 [ %out_l.promoted, %.preheader.lr.ph ], [ %33, %._crit_edge7 ]
+ %iter.08 = phi i32 [ 0, %.preheader.lr.ph ], [ %34, %._crit_edge7 ]
+ br i1 %3, label %._crit_edge7, label %.lr.ph6
+
+.loopexit: ; preds = %._crit_edge, %.lr.ph6
+ %11 = icmp slt i64 %indvars.iv23, %9
+ %indvars.iv.next18 = add nuw nsw i64 %indvars.iv17, 1
+ %indvars.iv.next14 = add nuw i32 %indvars.iv13, 1
+ br i1 %11, label %.lr.ph6, label %._crit_edge7
+
+.lr.ph6: ; preds = %.preheader, %.loopexit
+ %indvars.iv23 = phi i64 [ %indvars.iv.next24, %.loopexit ], [ 0, %.preheader ]
+ %indvars.iv17 = phi i64 [ %indvars.iv.next18, %.loopexit ], [ 1, %.preheader ]
+ %indvars.iv13 = phi i32 [ %indvars.iv.next14, %.loopexit ], [ 1, %.preheader ]
+ %indvars.iv.next24 = add nuw nsw i64 %indvars.iv23, 1
+ %12 = icmp slt i64 %indvars.iv23, %8
+ br i1 %12, label %.lr.ph4, label %.loopexit
+
+.lr.ph4: ; preds = %.lr.ph6, %._crit_edge
+ %indvars.iv19 = phi i64 [ %indvars.iv.next20, %._crit_edge ], [ %indvars.iv17, %.lr.ph6 ]
+ %indvars.iv15 = phi i32 [ %indvars.iv.next16, %._crit_edge ], [ %indvars.iv13, %.lr.ph6 ]
+ %13 = getelementptr inbounds [10 x [10 x [10 x i32]]], [10 x [10 x [10 x i32]]]* @sum_c, i64 0, i64 %indvars.iv23, i64 %indvars.iv19, i64 %indvars.iv23
+ store i32 0, i32* %13
+ %14 = add nsw i64 %indvars.iv19, -1
+ %15 = icmp slt i64 %indvars.iv23, %14
+ br i1 %15, label %.lr.ph, label %._crit_edge
+
+.lr.ph: ; preds = %.lr.ph4, %.lr.ph
+ %indvars.iv11 = phi i64 [ %indvars.iv.next12, %.lr.ph ], [ %indvars.iv17, %.lr.ph4 ]
+ %16 = add nsw i64 %indvars.iv11, -1
+ %17 = getelementptr inbounds [10 x [10 x [10 x i32]]], [10 x [10 x [10 x i32]]]* @sum_c, i64 0, i64 %indvars.iv23, i64 %indvars.iv19, i64 %16
+ %18 = load i32, i32* %17
+ %19 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @c, i64 0, i64 %indvars.iv23, i64 %indvars.iv11
+ %20 = load i32, i32* %19
+ %21 = add nsw i32 %20, %18
+ %22 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @c, i64 0, i64 %indvars.iv11, i64 %indvars.iv19
+ %23 = load i32, i32* %22
+ %24 = add nsw i32 %21, %23
+ %25 = getelementptr inbounds [10 x [10 x [10 x i32]]], [10 x [10 x [10 x i32]]]* @sum_c, i64 0, i64 %indvars.iv23, i64 %indvars.iv19, i64 %indvars.iv11
+ store i32 %24, i32* %25
+ %indvars.iv.next12 = add nuw nsw i64 %indvars.iv11, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next12 to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %indvars.iv15
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %.lr.ph4
+ %26 = getelementptr inbounds [10 x [10 x [10 x i32]]], [10 x [10 x [10 x i32]]]* @sum_c, i64 0, i64 %indvars.iv23, i64 %indvars.iv19, i64 %14
+ %27 = load i32, i32* %26
+ %28 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @W, i64 0, i64 %indvars.iv23, i64 %indvars.iv19
+ %29 = load i32, i32* %28
+ %30 = add nsw i32 %29, %27
+ %31 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @c, i64 0, i64 %indvars.iv23, i64 %indvars.iv19
+ store i32 %30, i32* %31
+ %indvars.iv.next16 = add nuw i32 %indvars.iv15, 1
+ %indvars.iv.next20 = add nuw nsw i64 %indvars.iv19, 1
+ %lftr.wideiv21 = trunc i64 %indvars.iv.next20 to i32
+ %exitcond22 = icmp eq i32 %lftr.wideiv21, %M
+ br i1 %exitcond22, label %.loopexit, label %.lr.ph4
+
+._crit_edge7: ; preds = %.loopexit, %.preheader
+ %32 = load i32, i32* %6
+ %33 = add nsw i32 %10, %32
+ %34 = add nuw nsw i32 %iter.08, 1
+ %exitcond25 = icmp eq i32 %34, %N
+ br i1 %exitcond25, label %._crit_edge9, label %.preheader
+
+._crit_edge9: ; preds = %._crit_edge7
+ store i32 %33, i32* @out_l
+ br label %35
+
+; <label>:35 ; preds = %._crit_edge9, %0
+ ret void
+}
+
diff --git a/test/Analysis/DependenceAnalysis/Coupled.ll b/test/Analysis/DependenceAnalysis/Coupled.ll
index 8c77849..096add6 100644
--- a/test/Analysis/DependenceAnalysis/Coupled.ll
+++ b/test/Analysis/DependenceAnalysis/Coupled.ll
@@ -24,13 +24,13 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
store i32 %conv, i32* %arrayidx1, align 4
%add = add nsw i64 %i.02, 9
%add2 = add nsw i64 %i.02, 10
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %add2, i64 %add
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add2, i64 %add
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
@@ -60,13 +60,13 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
store i32 %conv, i32* %arrayidx1, align 4
%add = add nsw i64 %i.02, 9
%add2 = add nsw i64 %i.02, 9
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %add2, i64 %add
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add2, i64 %add
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
@@ -100,11 +100,11 @@ for.body: ; preds = %entry, %for.body
%sub = add nsw i64 %mul, -6
%mul1 = mul nsw i64 %i.02, 3
%sub2 = add nsw i64 %mul1, -6
- %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %sub2, i64 %sub
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub2, i64 %sub
store i32 %conv, i32* %arrayidx3, align 4
- %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
@@ -138,11 +138,11 @@ for.body: ; preds = %entry, %for.body
%sub = add nsw i64 %mul, -5
%mul1 = mul nsw i64 %i.02, 3
%sub2 = add nsw i64 %mul1, -6
- %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %sub2, i64 %sub
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub2, i64 %sub
store i32 %conv, i32* %arrayidx3, align 4
- %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
@@ -177,11 +177,11 @@ for.body: ; preds = %entry, %for.body
%sub = sub nsw i64 %mul, %conv1
%mul2 = mul nsw i64 %i.02, 3
%sub3 = add nsw i64 %mul2, -6
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %sub3, i64 %sub
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub3, i64 %sub
store i32 %conv, i32* %arrayidx4, align 4
- %arrayidx6 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx6, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
@@ -218,11 +218,11 @@ for.body: ; preds = %entry, %for.body
%conv3 = sext i32 %n to i64
%sub4 = sub nsw i64 %mul2, %conv3
%add = add nsw i64 %sub4, 1
- %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %add, i64 %sub
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add, i64 %sub
store i32 %conv, i32* %arrayidx5, align 4
- %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx7, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
@@ -254,11 +254,11 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.02 to i32
%mul = mul nsw i64 %i.02, 3
%sub = add nsw i64 %mul, -6
- %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %sub
+ %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %sub
store i32 %conv, i32* %arrayidx1, align 4
- %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx3, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
@@ -290,11 +290,11 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.02 to i32
%mul = mul nsw i64 %i.02, 3
%sub = add nsw i64 %mul, -5
- %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %sub
+ %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %sub
store i32 %conv, i32* %arrayidx1, align 4
- %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx3, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
@@ -327,11 +327,11 @@ for.body: ; preds = %entry, %for.body
%sub = sub nsw i64 3, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 16
@@ -364,11 +364,11 @@ for.body: ; preds = %entry, %for.body
%sub = sub nsw i64 2, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 16
@@ -402,11 +402,11 @@ for.body: ; preds = %entry, %for.body
%sub = sub nsw i64 6, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 16
@@ -440,11 +440,11 @@ for.body: ; preds = %entry, %for.body
%sub = sub nsw i64 18, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 16
@@ -478,11 +478,11 @@ for.body: ; preds = %entry, %for.body
%sub = sub nsw i64 22, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 13
@@ -515,11 +515,11 @@ for.body: ; preds = %entry, %for.body
%sub = sub nsw i64 22, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 12
@@ -552,11 +552,11 @@ for.body: ; preds = %entry, %for.body
%sub = sub nsw i64 18, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx3 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02
+ %arrayidx3 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02
store i32 %conv, i32* %arrayidx3, align 4
- %arrayidx6 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx6, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 100
@@ -589,11 +589,11 @@ for.body: ; preds = %entry, %for.body
%sub = sub nsw i64 22, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx3 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02
+ %arrayidx3 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02
store i32 %conv, i32* %arrayidx3, align 4
- %arrayidx6 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02
+ %0 = load i32, i32* %arrayidx6, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 100
diff --git a/test/Analysis/DependenceAnalysis/ExactRDIV.ll b/test/Analysis/DependenceAnalysis/ExactRDIV.ll
index 81f5516..5b2488c 100644
--- a/test/Analysis/DependenceAnalysis/ExactRDIV.ll
+++ b/test/Analysis/DependenceAnalysis/ExactRDIV.ll
@@ -26,7 +26,7 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %i.03, 2
%add = add nsw i64 %mul, 10
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc, 10
@@ -40,9 +40,9 @@ for.body4: ; preds = %for.body4.preheader
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%mul5 = shl nsw i64 %j.02, 1
%add64 = or i64 %mul5, 1
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add64
- %0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add64
+ %0 = load i32, i32* %arrayidx7, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc9 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc9, 10
@@ -74,7 +74,7 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = add nsw i64 %mul, -45
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 5
@@ -86,9 +86,9 @@ for.body4.preheader: ; preds = %for.body
for.body4: ; preds = %for.body4.preheader, %for.body4
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 10
@@ -120,7 +120,7 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = add nsw i64 %mul, -45
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 6
@@ -132,9 +132,9 @@ for.body4.preheader: ; preds = %for.body
for.body4: ; preds = %for.body4.preheader, %for.body4
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 10
@@ -166,7 +166,7 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = add nsw i64 %mul, -45
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 5
@@ -178,9 +178,9 @@ for.body4.preheader: ; preds = %for.body
for.body4: ; preds = %for.body4.preheader, %for.body4
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 11
@@ -212,7 +212,7 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = add nsw i64 %mul, -45
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 6
@@ -224,9 +224,9 @@ for.body4.preheader: ; preds = %for.body
for.body4: ; preds = %for.body4.preheader, %for.body4
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 11
@@ -258,7 +258,7 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, -11
%add = add nsw i64 %mul, 45
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 5
@@ -271,9 +271,9 @@ for.body4: ; preds = %for.body4.preheader
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 10
@@ -305,7 +305,7 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, -11
%add = add nsw i64 %mul, 45
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 6
@@ -318,9 +318,9 @@ for.body4: ; preds = %for.body4.preheader
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 10
@@ -352,7 +352,7 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, -11
%add = add nsw i64 %mul, 45
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 5
@@ -365,9 +365,9 @@ for.body4: ; preds = %for.body4.preheader
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 11
@@ -399,7 +399,7 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, -11
%add = add nsw i64 %mul, 45
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 6
@@ -412,9 +412,9 @@ for.body4: ; preds = %for.body4.preheader
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 11
@@ -452,18 +452,18 @@ for.body3: ; preds = %for.cond1.preheader
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = sub nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 45
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc5
for.inc5: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 10
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 10
%inc6 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc6, 5
br i1 %exitcond5, label %for.cond1.preheader, label %for.end7
@@ -501,18 +501,18 @@ for.body3: ; preds = %for.cond1.preheader
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = sub nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 45
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc5
for.inc5: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 10
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 10
%inc6 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc6, 6
br i1 %exitcond5, label %for.cond1.preheader, label %for.end7
@@ -549,18 +549,18 @@ for.body3: ; preds = %for.cond1.preheader
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = sub nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 45
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 11
br i1 %exitcond, label %for.body3, label %for.inc5
for.inc5: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 11
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 11
%inc6 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc6, 5
br i1 %exitcond5, label %for.cond1.preheader, label %for.end7
@@ -597,18 +597,18 @@ for.body3: ; preds = %for.cond1.preheader
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = sub nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 45
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 11
br i1 %exitcond, label %for.body3, label %for.inc5
for.inc5: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 11
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 11
%inc6 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc6, 6
br i1 %exitcond5, label %for.cond1.preheader, label %for.end7
diff --git a/test/Analysis/DependenceAnalysis/ExactSIV.ll b/test/Analysis/DependenceAnalysis/ExactSIV.ll
index 586bbe5..d84cd05 100644
--- a/test/Analysis/DependenceAnalysis/ExactSIV.ll
+++ b/test/Analysis/DependenceAnalysis/ExactSIV.ll
@@ -25,13 +25,13 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%add = add i64 %i.02, 10
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
%add13 = or i64 %mul, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %add13
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add13
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 10
@@ -63,13 +63,13 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 2
%add = add i64 %mul, 10
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul1 = shl i64 %i.02, 1
%add23 = or i64 %mul1, 1
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %add23
- %0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add23
+ %0 = load i32, i32* %arrayidx3, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 10
@@ -100,12 +100,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 10
@@ -136,12 +136,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 11
@@ -172,12 +172,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 12
@@ -208,12 +208,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 13
@@ -244,12 +244,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 18
@@ -280,12 +280,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 19
@@ -316,12 +316,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 10
@@ -352,12 +352,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 11
@@ -388,12 +388,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 12
@@ -424,12 +424,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 13
@@ -460,12 +460,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 18
@@ -496,12 +496,12 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 19
diff --git a/test/Analysis/DependenceAnalysis/GCD.ll b/test/Analysis/DependenceAnalysis/GCD.ll
index 7eca18e..81d05a1 100644
--- a/test/Analysis/DependenceAnalysis/GCD.ll
+++ b/test/Analysis/DependenceAnalysis/GCD.ll
@@ -43,21 +43,21 @@ for.body3: ; preds = %for.cond1.preheader
%mul = shl nsw i64 %i.03, 1
%mul4 = shl nsw i64 %j.02, 2
%sub = sub nsw i64 %mul, %mul4
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%mul5 = mul nsw i64 %i.03, 6
%mul6 = shl nsw i64 %j.02, 3
%add = add nsw i64 %mul5, %mul6
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx7, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
@@ -104,22 +104,22 @@ for.body3: ; preds = %for.cond1.preheader
%mul = shl nsw i64 %i.03, 1
%mul4 = shl nsw i64 %j.02, 2
%sub = sub nsw i64 %mul, %mul4
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%mul5 = mul nsw i64 %i.03, 6
%mul6 = shl nsw i64 %j.02, 3
%add = add nsw i64 %mul5, %mul6
%add7 = or i64 %add, 1
- %arrayidx8 = getelementptr inbounds i32* %A, i64 %add7
- %0 = load i32* %arrayidx8, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7
+ %0 = load i32, i32* %arrayidx8, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc9
for.inc9: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc10 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc10, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end11
@@ -167,21 +167,21 @@ for.body3: ; preds = %for.cond1.preheader
%mul4 = shl nsw i64 %j.02, 2
%sub = sub nsw i64 %mul, %mul4
%add5 = or i64 %sub, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add5
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add5
store i32 %conv, i32* %arrayidx, align 4
%mul5 = mul nsw i64 %i.03, 6
%mul6 = shl nsw i64 %j.02, 3
%add7 = add nsw i64 %mul5, %mul6
- %arrayidx8 = getelementptr inbounds i32* %A, i64 %add7
- %0 = load i32* %arrayidx8, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7
+ %0 = load i32, i32* %arrayidx8, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc9
for.inc9: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc10 = add nsw i64 %i.03, 1
%exitcond6 = icmp ne i64 %inc10, 100
br i1 %exitcond6, label %for.cond1.preheader, label %for.end11
@@ -227,21 +227,21 @@ for.body3: ; preds = %for.cond1.preheader
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %j.02, 1
%add = add nsw i64 %i.03, %mul
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul4 = shl nsw i64 %j.02, 1
%add5 = add nsw i64 %i.03, %mul4
%sub = add nsw i64 %add5, -1
- %arrayidx6 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx6, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
@@ -292,7 +292,7 @@ for.body3: ; preds = %for.cond1.preheader
%mul6 = mul nsw i64 %M, 9
%mul7 = mul nsw i64 %mul6, %N
%add8 = add nsw i64 %add, %mul7
- %arrayidx = getelementptr inbounds i32* %A, i64 %add8
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add8
store i32 %conv, i32* %arrayidx, align 4
%mul9 = mul nsw i64 %i.03, 15
%mul10 = mul nsw i64 %j.02, 20
@@ -302,16 +302,16 @@ for.body3: ; preds = %for.cond1.preheader
%mul14 = mul nsw i64 %mul13, %M
%sub = sub nsw i64 %add12, %mul14
%add15 = add nsw i64 %sub, 4
- %arrayidx16 = getelementptr inbounds i32* %A, i64 %add15
- %0 = load i32* %arrayidx16, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %add15
+ %0 = load i32, i32* %arrayidx16, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc17
for.inc17: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc18 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc18, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end19
@@ -362,7 +362,7 @@ for.body3: ; preds = %for.cond1.preheader
%mul6 = mul nsw i64 %M, 9
%mul7 = mul nsw i64 %mul6, %N
%add8 = add nsw i64 %add, %mul7
- %arrayidx = getelementptr inbounds i32* %A, i64 %add8
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add8
store i32 %conv, i32* %arrayidx, align 4
%mul9 = mul nsw i64 %i.03, 15
%mul10 = mul nsw i64 %j.02, 20
@@ -372,16 +372,16 @@ for.body3: ; preds = %for.cond1.preheader
%mul14 = mul nsw i64 %mul13, %M
%sub = sub nsw i64 %add12, %mul14
%add15 = add nsw i64 %sub, 5
- %arrayidx16 = getelementptr inbounds i32* %A, i64 %add15
- %0 = load i32* %arrayidx16, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %add15
+ %0 = load i32, i32* %arrayidx16, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc17
for.inc17: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc18 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc18, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end19
@@ -437,23 +437,23 @@ for.body3: ; preds = %for.body3.preheader
%mul4 = shl nsw i64 %i.06, 1
%0 = mul nsw i64 %mul4, %n
%arrayidx.sum = add i64 %0, %mul
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %arrayidx.sum
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %arrayidx.sum
store i32 %conv, i32* %arrayidx5, align 4
%mul6 = mul nsw i64 %j.03, 6
%add7 = or i64 %mul6, 1
%mul7 = shl nsw i64 %i.06, 3
%1 = mul nsw i64 %mul7, %n
%arrayidx8.sum = add i64 %1, %add7
- %arrayidx9 = getelementptr inbounds i32* %A, i64 %arrayidx8.sum
- %2 = load i32* %arrayidx9, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1
+ %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 %arrayidx8.sum
+ %2 = load i32, i32* %arrayidx9, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %2, i32* %B.addr.12, align 4
%inc = add nsw i64 %j.03, 1
%exitcond = icmp ne i64 %inc, %n
br i1 %exitcond, label %for.body3, label %for.inc10.loopexit
for.inc10.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.05, i64 %n
+ %scevgep = getelementptr i32, i32* %B.addr.05, i64 %n
br label %for.inc10
for.inc10: ; preds = %for.inc10.loopexit, %for.cond1.preheader
@@ -523,7 +523,7 @@ for.body3: ; preds = %for.body3.preheader
%idxprom5 = sext i32 %mul4 to i64
%6 = mul nsw i64 %idxprom5, %0
%arrayidx.sum = add i64 %6, %idxprom
- %arrayidx6 = getelementptr inbounds i32* %A, i64 %arrayidx.sum
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %arrayidx.sum
%7 = trunc i64 %indvars.iv8 to i32
store i32 %7, i32* %arrayidx6, align 4
%8 = trunc i64 %indvars.iv to i32
@@ -535,9 +535,9 @@ for.body3: ; preds = %for.body3.preheader
%idxprom10 = sext i32 %mul9 to i64
%10 = mul nsw i64 %idxprom10, %0
%arrayidx11.sum = add i64 %10, %idxprom8
- %arrayidx12 = getelementptr inbounds i32* %A, i64 %arrayidx11.sum
- %11 = load i32* %arrayidx12, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1
+ %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %arrayidx11.sum
+ %11 = load i32, i32* %arrayidx12, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %11, i32* %B.addr.12, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -545,7 +545,7 @@ for.body3: ; preds = %for.body3.preheader
br i1 %exitcond, label %for.body3, label %for.inc13.loopexit
for.inc13.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.05, i64 %3
+ %scevgep = getelementptr i32, i32* %B.addr.05, i64 %3
br label %for.inc13
for.inc13: ; preds = %for.inc13.loopexit, %for.cond1.preheader
@@ -613,7 +613,7 @@ for.body3: ; preds = %for.body3.preheader
%mul5 = shl nsw i32 %3, 2
%add = add nsw i32 %mul4, %mul5
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
store i32 %i.06, i32* %arrayidx, align 4
%mul6 = shl nsw i32 %n, 3
%mul7 = mul nsw i32 %mul6, %i.06
@@ -622,9 +622,9 @@ for.body3: ; preds = %for.body3.preheader
%add9 = add nsw i32 %mul7, %mul8
%add10 = or i32 %add9, 1
%idxprom11 = sext i32 %add10 to i64
- %arrayidx12 = getelementptr inbounds i32* %A, i64 %idxprom11
- %5 = load i32* %arrayidx12, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1
+ %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %idxprom11
+ %5 = load i32, i32* %arrayidx12, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %5, i32* %B.addr.12, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -632,7 +632,7 @@ for.body3: ; preds = %for.body3.preheader
br i1 %exitcond, label %for.body3, label %for.inc13.loopexit
for.inc13.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.05, i64 %2
+ %scevgep = getelementptr i32, i32* %B.addr.05, i64 %2
br label %for.inc13
for.inc13: ; preds = %for.inc13.loopexit, %for.cond1.preheader
@@ -702,7 +702,7 @@ for.body3: ; preds = %for.body3.preheader
%idxprom5 = zext i32 %mul4 to i64
%6 = mul nsw i64 %idxprom5, %0
%arrayidx.sum = add i64 %6, %idxprom
- %arrayidx6 = getelementptr inbounds i32* %A, i64 %arrayidx.sum
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %arrayidx.sum
%7 = trunc i64 %indvars.iv8 to i32
store i32 %7, i32* %arrayidx6, align 4
%8 = trunc i64 %indvars.iv to i32
@@ -714,9 +714,9 @@ for.body3: ; preds = %for.body3.preheader
%idxprom10 = zext i32 %mul9 to i64
%10 = mul nsw i64 %idxprom10, %0
%arrayidx11.sum = add i64 %10, %idxprom8
- %arrayidx12 = getelementptr inbounds i32* %A, i64 %arrayidx11.sum
- %11 = load i32* %arrayidx12, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1
+ %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %arrayidx11.sum
+ %11 = load i32, i32* %arrayidx12, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %11, i32* %B.addr.12, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -724,7 +724,7 @@ for.body3: ; preds = %for.body3.preheader
br i1 %exitcond, label %for.body3, label %for.inc13.loopexit
for.inc13.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.05, i64 %3
+ %scevgep = getelementptr i32, i32* %B.addr.05, i64 %3
br label %for.inc13
for.inc13: ; preds = %for.inc13.loopexit, %for.cond1.preheader
diff --git a/test/Analysis/DependenceAnalysis/Invariant.ll b/test/Analysis/DependenceAnalysis/Invariant.ll
index 202d8e2..9fdb4d9 100644
--- a/test/Analysis/DependenceAnalysis/Invariant.ll
+++ b/test/Analysis/DependenceAnalysis/Invariant.ll
@@ -19,10 +19,10 @@ for.cond1.preheader:
for.body3:
%j.02 = phi i32 [ 0, %for.cond1.preheader ], [ %add8, %for.body3 ]
%res.11 = phi float [ %res.03, %for.cond1.preheader ], [ %add.res.1, %for.body3 ]
- %arrayidx4 = getelementptr inbounds [40 x float]* %rr, i32 %j.02, i32 %j.02
- %0 = load float* %arrayidx4, align 4
- %arrayidx6 = getelementptr inbounds [40 x float]* %rr, i32 %i.04, i32 %j.02
- %1 = load float* %arrayidx6, align 4
+ %arrayidx4 = getelementptr inbounds [40 x float], [40 x float]* %rr, i32 %j.02, i32 %j.02
+ %0 = load float, float* %arrayidx4, align 4
+ %arrayidx6 = getelementptr inbounds [40 x float], [40 x float]* %rr, i32 %i.04, i32 %j.02
+ %1 = load float, float* %arrayidx6, align 4
%add = fadd float %0, %1
%cmp7 = fcmp ogt float %add, %g
%add.res.1 = select i1 %cmp7, float %add, float %res.11
diff --git a/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll b/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
index 95e5e52..1b47341 100644
--- a/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
+++ b/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
@@ -26,9 +26,9 @@ for.body:
; DELIN: da analyze - anti [=|<]!
; DELIN: da analyze - none!
%i = phi i64 [ 0, %entry ], [ %i.inc, %for.body ]
- %a.addr = getelementptr [100 x [100 x i32]]* %a, i64 0, i64 %i, i64 %i
- %a.addr.2 = getelementptr [100 x [100 x i32]]* %a, i64 0, i64 %i, i32 5
- %0 = load i32* %a.addr, align 4
+ %a.addr = getelementptr [100 x [100 x i32]], [100 x [100 x i32]]* %a, i64 0, i64 %i, i64 %i
+ %a.addr.2 = getelementptr [100 x [100 x i32]], [100 x [100 x i32]]* %a, i64 0, i64 %i, i32 5
+ %0 = load i32, i32* %a.addr, align 4
%1 = add i32 %0, 1
store i32 %1, i32* %a.addr.2, align 4
%i.inc = add nsw i64 %i, 1
diff --git a/test/Analysis/DependenceAnalysis/Preliminary.ll b/test/Analysis/DependenceAnalysis/Preliminary.ll
index f36b85a..d6500cc 100644
--- a/test/Analysis/DependenceAnalysis/Preliminary.ll
+++ b/test/Analysis/DependenceAnalysis/Preliminary.ll
@@ -17,8 +17,8 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
- %arrayidx1 = getelementptr inbounds i32* %B, i64 1
- %0 = load i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32* %B, i64 1
+ %0 = load i32, i32* %arrayidx1, align 4
ret i32 %0
}
@@ -35,8 +35,8 @@ entry:
; CHECK: da analyze - none!
; CHECK: da analyze - none!
- %arrayidx1 = getelementptr inbounds i32* %B, i64 1
- %0 = load i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32* %B, i64 1
+ %0 = load i32, i32* %arrayidx1, align 4
ret i32 %0
}
@@ -84,7 +84,7 @@ for.body6.preheader: ; preds = %for.cond4.preheader
for.body6: ; preds = %for.body6.preheader, %for.body6
%k.02 = phi i64 [ %inc, %for.body6 ], [ 0, %for.body6.preheader ]
- %arrayidx8 = getelementptr inbounds [100 x [100 x i64]]* %A, i64 %i.011, i64 %j.07, i64 %k.02
+ %arrayidx8 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %A, i64 %i.011, i64 %j.07, i64 %k.02
store i64 %i.011, i64* %arrayidx8, align 8
%inc = add nsw i64 %k.02, 1
%exitcond13 = icmp ne i64 %inc, %n
@@ -106,16 +106,16 @@ for.body12: ; preds = %for.body12.preheade
%add = add nsw i64 %k9.05, 1
%add13 = add nsw i64 %j.07, 2
%add14 = add nsw i64 %i.011, 3
- %arrayidx17 = getelementptr inbounds [100 x [100 x i64]]* %A, i64 %add14, i64 %add13, i64 %add
- %0 = load i64* %arrayidx17, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.24, i64 1
+ %arrayidx17 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %A, i64 %add14, i64 %add13, i64 %add
+ %0 = load i64, i64* %arrayidx17, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.24, i64 1
store i64 %0, i64* %B.addr.24, align 8
%inc19 = add nsw i64 %k9.05, 1
%exitcond = icmp ne i64 %inc19, %n
br i1 %exitcond, label %for.body12, label %for.inc21.loopexit
for.inc21.loopexit: ; preds = %for.body12
- %scevgep = getelementptr i64* %B.addr.18, i64 %n
+ %scevgep = getelementptr i64, i64* %B.addr.18, i64 %n
br label %for.inc21
for.inc21: ; preds = %for.inc21.loopexit, %for.cond10.loopexit
@@ -281,7 +281,7 @@ for.body33: ; preds = %for.body33.preheade
%add3547 = or i64 %mul, 1
%sub = add nsw i64 %k.037, -1
%sub36 = add nsw i64 %i.045, -3
- %arrayidx43 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %sub36, i64 %j.041, i64 2, i64 %sub, i64 %add3547, i64 %m.029, i64 %add34, i64 %add
+ %arrayidx43 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]], [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %sub36, i64 %j.041, i64 2, i64 %sub, i64 %add3547, i64 %m.029, i64 %add34, i64 %add
store i64 %i.045, i64* %arrayidx43, align 8
%add44 = add nsw i64 %t.03, 2
%add45 = add nsw i64 %n, 1
@@ -289,16 +289,16 @@ for.body33: ; preds = %for.body33.preheade
%sub47 = add nsw i64 %mul46, -1
%sub48 = sub nsw i64 1, %k.037
%add49 = add nsw i64 %i.045, 3
- %arrayidx57 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %add49, i64 2, i64 %u.06, i64 %sub48, i64 %sub47, i64 %o.025, i64 %add45, i64 %add44
- %0 = load i64* %arrayidx57, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.112, i64 1
+ %arrayidx57 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]], [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %add49, i64 2, i64 %u.06, i64 %sub48, i64 %sub47, i64 %o.025, i64 %add45, i64 %add44
+ %0 = load i64, i64* %arrayidx57, align 8
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.112, i64 1
store i64 %0, i64* %B.addr.112, align 8
%inc = add nsw i64 %t.03, 1
%exitcond = icmp ne i64 %inc, %n
br i1 %exitcond, label %for.body33, label %for.inc58.loopexit
for.inc58.loopexit: ; preds = %for.body33
- %scevgep = getelementptr i64* %B.addr.105, i64 %n
+ %scevgep = getelementptr i64, i64* %B.addr.105, i64 %n
br label %for.inc58
for.inc58: ; preds = %for.inc58.loopexit, %for.cond31.preheader
@@ -441,12 +441,12 @@ for.body: ; preds = %for.body.preheader,
%conv2 = sext i8 %i.03 to i32
%conv3 = sext i8 %i.03 to i64
%add = add i64 %conv3, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv2, i32* %arrayidx, align 4
%idxprom4 = sext i8 %i.03 to i64
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %idxprom4
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i8 %i.03, 1
%conv = sext i8 %inc to i64
@@ -487,12 +487,12 @@ for.body: ; preds = %for.body.preheader,
%conv2 = sext i16 %i.03 to i32
%conv3 = sext i16 %i.03 to i64
%add = add i64 %conv3, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv2, i32* %arrayidx, align 4
%idxprom4 = sext i16 %i.03 to i64
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %idxprom4
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i16 %i.03, 1
%conv = sext i16 %inc to i64
@@ -531,12 +531,12 @@ for.body: ; preds = %for.body.preheader,
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%0 = add nsw i64 %indvars.iv, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %0
%1 = trunc i64 %indvars.iv to i32
store i32 %1, i32* %arrayidx, align 4
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %2 = load i32, i32* %arrayidx3, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %2, i32* %B.addr.02, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, %n
@@ -557,7 +557,7 @@ for.end: ; preds = %for.end.loopexit, %
define void @p7(i32* %A, i32* %B, i8 signext %n) nounwind uwtable ssp {
entry:
%idxprom = sext i8 %n to i64
- %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
; CHECK: da analyze - none!
; CHECK: da analyze - none!
@@ -569,8 +569,8 @@ entry:
store i32 0, i32* %arrayidx, align 4
%conv = sext i8 %n to i64
%add = add i64 %conv, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -583,7 +583,7 @@ entry:
define void @p8(i32* %A, i32* %B, i16 signext %n) nounwind uwtable ssp {
entry:
%idxprom = sext i16 %n to i64
- %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
@@ -595,8 +595,8 @@ entry:
%conv = sext i16 %n to i64
%add = add i64 %conv, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -609,7 +609,7 @@ entry:
define void @p9(i32* %A, i32* %B, i32 %n) nounwind uwtable ssp {
entry:
%idxprom = sext i32 %n to i64
- %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
@@ -621,8 +621,8 @@ entry:
%add = add nsw i32 %n, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1
- %0 = load i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1
+ %0 = load i32, i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -635,7 +635,7 @@ entry:
define void @p10(i32* %A, i32* %B, i32 %n) nounwind uwtable ssp {
entry:
%idxprom = zext i32 %n to i64
- %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
@@ -647,8 +647,8 @@ entry:
%add = add i32 %n, 1
%idxprom1 = zext i32 %add to i64
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1
- %0 = load i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1
+ %0 = load i32, i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -668,7 +668,7 @@ define void @f(%struct.S* %s, i32 %size) nounwind uwtable ssp {
entry:
%idx.ext = zext i32 %size to i64
%add.ptr.sum = add i64 %idx.ext, -1
- %add.ptr1 = getelementptr inbounds %struct.S* %s, i64 %add.ptr.sum
+ %add.ptr1 = getelementptr inbounds %struct.S, %struct.S* %s, i64 %add.ptr.sum
%cmp1 = icmp eq i64 %add.ptr.sum, 0
br i1 %cmp1, label %while.end, label %while.body.preheader
@@ -681,11 +681,11 @@ while.body.preheader: ; preds = %entry
while.body: ; preds = %while.body.preheader, %while.body
%i.02 = phi %struct.S* [ %incdec.ptr, %while.body ], [ %s, %while.body.preheader ]
- %0 = getelementptr inbounds %struct.S* %i.02, i64 1, i32 0
- %1 = load i32* %0, align 4
- %2 = getelementptr inbounds %struct.S* %i.02, i64 0, i32 0
+ %0 = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 1, i32 0
+ %1 = load i32, i32* %0, align 4
+ %2 = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 0, i32 0
store i32 %1, i32* %2, align 4
- %incdec.ptr = getelementptr inbounds %struct.S* %i.02, i64 1
+ %incdec.ptr = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 1
%cmp = icmp eq %struct.S* %incdec.ptr, %add.ptr1
br i1 %cmp, label %while.end.loopexit, label %while.body
diff --git a/test/Analysis/DependenceAnalysis/Propagating.ll b/test/Analysis/DependenceAnalysis/Propagating.ll
index f9034ed..5a97b99 100644
--- a/test/Analysis/DependenceAnalysis/Propagating.ll
+++ b/test/Analysis/DependenceAnalysis/Propagating.ll
@@ -32,19 +32,19 @@ for.body3: ; preds = %for.cond1.preheader
%conv = trunc i64 %i.03 to i32
%add = add nsw i64 %i.03, %j.02
%add4 = add nsw i64 %i.03, 1
- %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %add4, i64 %add
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add4, i64 %add
store i32 %conv, i32* %arrayidx5, align 4
%add6 = add nsw i64 %i.03, %j.02
- %arrayidx8 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add6
- %0 = load i32* %arrayidx8, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add6
+ %0 = load i32, i32* %arrayidx8, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc9
for.inc9: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc10 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc10, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end11
@@ -88,26 +88,26 @@ for.body6: ; preds = %for.cond4.preheader
%add = add nsw i64 %j.03, %k.02
%add7 = add nsw i64 %i.05, 1
%sub = sub nsw i64 %j.03, %i.05
- %arrayidx9 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub, i64 %add7, i64 %add
+ %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub, i64 %add7, i64 %add
store i32 %conv, i32* %arrayidx9, align 4
%add10 = add nsw i64 %j.03, %k.02
%sub11 = sub nsw i64 %j.03, %i.05
- %arrayidx14 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub11, i64 %i.05, i64 %add10
- %0 = load i32* %arrayidx14, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.21, i64 1
+ %arrayidx14 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub11, i64 %i.05, i64 %add10
+ %0 = load i32, i32* %arrayidx14, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.21, i64 1
store i32 %0, i32* %B.addr.21, align 4
%inc = add nsw i64 %k.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body6, label %for.inc15
for.inc15: ; preds = %for.body6
- %scevgep = getelementptr i32* %B.addr.14, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.14, i64 100
%inc16 = add nsw i64 %j.03, 1
%exitcond8 = icmp ne i64 %inc16, 100
br i1 %exitcond8, label %for.cond4.preheader, label %for.inc18
for.inc18: ; preds = %for.inc15
- %scevgep7 = getelementptr i32* %B.addr.06, i64 10000
+ %scevgep7 = getelementptr i32, i32* %B.addr.06, i64 10000
%inc19 = add nsw i64 %i.05, 1
%exitcond9 = icmp ne i64 %inc19, 100
br i1 %exitcond9, label %for.cond1.preheader, label %for.end20
@@ -144,20 +144,20 @@ for.body3: ; preds = %for.cond1.preheader
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %i.03, 1
%sub = add nsw i64 %i.03, -1
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %sub, i64 %mul
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub, i64 %mul
store i32 %conv, i32* %arrayidx4, align 4
%add = add nsw i64 %i.03, %j.02
%add5 = add nsw i64 %add, 110
- %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add5
- %0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add5
+ %0 = load i32, i32* %arrayidx7, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
@@ -194,21 +194,21 @@ for.body3: ; preds = %for.cond1.preheader
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %j.02, 1
%add = add nsw i64 %mul, %i.03
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add
store i32 %conv, i32* %arrayidx4, align 4
%mul5 = shl nsw i64 %j.02, 1
%sub = sub nsw i64 %mul5, %i.03
%add6 = add nsw i64 %sub, 5
- %arrayidx8 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add6
- %0 = load i32* %arrayidx8, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add6
+ %0 = load i32, i32* %arrayidx8, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc9
for.inc9: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc10 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc10, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end11
@@ -247,20 +247,20 @@ for.body3: ; preds = %for.cond1.preheader
%add = add nsw i64 %mul, %j.02
%add4 = add nsw i64 %add, 1
%add5 = add nsw i64 %i.03, 2
- %arrayidx6 = getelementptr inbounds [100 x i32]* %A, i64 %add5, i64 %add4
+ %arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add5, i64 %add4
store i32 %conv, i32* %arrayidx6, align 4
%mul7 = shl nsw i64 %i.03, 1
%add8 = add nsw i64 %mul7, %j.02
- %arrayidx10 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add8
- %0 = load i32* %arrayidx10, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx10 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add8
+ %0 = load i32, i32* %arrayidx10, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc11
for.inc11: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc12 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc12, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end13
@@ -301,20 +301,20 @@ for.body3: ; preds = %for.cond1.preheader
%sub = sub nsw i64 22, %i.03
%mul4 = mul nsw i64 %i.03, 3
%sub5 = add nsw i64 %mul4, -18
- %arrayidx7 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub5, i64 %sub, i64 %add
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub5, i64 %sub, i64 %add
store i32 %conv, i32* %arrayidx7, align 4
%mul8 = mul nsw i64 %i.03, 3
%add9 = add nsw i64 %mul8, %j.02
- %arrayidx12 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.03, i64 %i.03, i64 %add9
- %0 = load i32* %arrayidx12, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx12 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.03, i64 %i.03, i64 %add9
+ %0 = load i32, i32* %arrayidx12, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc13
for.inc13: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc14 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc14, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end15
@@ -353,21 +353,21 @@ for.body3: ; preds = %for.cond1.preheader
%add = add nsw i64 %mul, %j.02
%add4 = add nsw i64 %add, 2
%add5 = add nsw i64 %i.03, 1
- %arrayidx6 = getelementptr inbounds [100 x i32]* %A, i64 %add5, i64 %add4
+ %arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add5, i64 %add4
store i32 %conv, i32* %arrayidx6, align 4
%mul7 = shl nsw i64 %i.03, 3
%add8 = add nsw i64 %mul7, %j.02
%mul9 = shl nsw i64 %i.03, 1
- %arrayidx11 = getelementptr inbounds [100 x i32]* %A, i64 %mul9, i64 %add8
- %0 = load i32* %arrayidx11, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx11 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %mul9, i64 %add8
+ %0 = load i32, i32* %arrayidx11, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc12
for.inc12: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc13 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc13, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end14
@@ -408,22 +408,22 @@ for.body3: ; preds = %for.cond1.preheader
%add4 = add nsw i64 %add, 2
%mul5 = shl nsw i64 %i.03, 1
%add6 = add nsw i64 %mul5, 4
- %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %add6, i64 %add4
+ %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add6, i64 %add4
store i32 %conv, i32* %arrayidx7, align 4
%mul8 = mul nsw i64 %i.03, 5
%add9 = add nsw i64 %mul8, %j.02
%mul10 = mul nsw i64 %i.03, -2
%add11 = add nsw i64 %mul10, 20
- %arrayidx13 = getelementptr inbounds [100 x i32]* %A, i64 %add11, i64 %add9
- %0 = load i32* %arrayidx13, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx13 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add11, i64 %add9
+ %0 = load i32, i32* %arrayidx13, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc14
for.inc14: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc15 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc15, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end16
@@ -459,22 +459,22 @@ for.body3: ; preds = %for.cond1.preheader
%B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%conv = trunc i64 %i.03 to i32
%add = add nsw i64 %j.02, 2
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 4, i64 %add
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 4, i64 %add
store i32 %conv, i32* %arrayidx4, align 4
%mul = mul nsw i64 %i.03, 5
%add5 = add nsw i64 %mul, %j.02
%mul6 = mul nsw i64 %i.03, -2
%add7 = add nsw i64 %mul6, 4
- %arrayidx9 = getelementptr inbounds [100 x i32]* %A, i64 %add7, i64 %add5
- %0 = load i32* %arrayidx9, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add7, i64 %add5
+ %0 = load i32, i32* %arrayidx9, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc10
for.inc10: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc11 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc11, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end12
@@ -514,18 +514,18 @@ for.body3: ; preds = %for.cond1.preheader
%add4 = add nsw i64 %add, 2
%mul5 = shl nsw i64 %i.03, 1
%add6 = add nsw i64 %mul5, 4
- %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %add6, i64 %add4
+ %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add6, i64 %add4
store i32 %conv, i32* %arrayidx7, align 4
- %arrayidx9 = getelementptr inbounds [100 x i32]* %A, i64 4, i64 %j.02
- %0 = load i32* %arrayidx9, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 4, i64 %j.02
+ %0 = load i32, i32* %arrayidx9, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc10
for.inc10: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc11 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc11, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end12
diff --git a/test/Analysis/DependenceAnalysis/Separability.ll b/test/Analysis/DependenceAnalysis/Separability.ll
index 3dcaaec..e56e741 100644
--- a/test/Analysis/DependenceAnalysis/Separability.ll
+++ b/test/Analysis/DependenceAnalysis/Separability.ll
@@ -44,33 +44,33 @@ for.body9: ; preds = %for.cond7.preheader
%conv = trunc i64 %i.07 to i32
%add = add nsw i64 %j.05, %k.03
%idxprom = sext i32 %n to i64
- %arrayidx11 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %idxprom, i64 %i.07, i64 %add
+ %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %idxprom, i64 %i.07, i64 %add
store i32 %conv, i32* %arrayidx11, align 4
%mul = shl nsw i64 %j.05, 1
%sub = sub nsw i64 %mul, %l.02
%add12 = add nsw i64 %i.07, 10
- %arrayidx15 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub
- %0 = load i32* %arrayidx15, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1
+ %arrayidx15 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub
+ %0 = load i32, i32* %arrayidx15, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
%exitcond = icmp ne i64 %inc, 50
br i1 %exitcond, label %for.body9, label %for.inc16
for.inc16: ; preds = %for.body9
- %scevgep = getelementptr i32* %B.addr.24, i64 50
+ %scevgep = getelementptr i32, i32* %B.addr.24, i64 50
%inc17 = add nsw i64 %k.03, 1
%exitcond10 = icmp ne i64 %inc17, 50
br i1 %exitcond10, label %for.cond7.preheader, label %for.inc19
for.inc19: ; preds = %for.inc16
- %scevgep9 = getelementptr i32* %B.addr.16, i64 2500
+ %scevgep9 = getelementptr i32, i32* %B.addr.16, i64 2500
%inc20 = add nsw i64 %j.05, 1
%exitcond12 = icmp ne i64 %inc20, 50
br i1 %exitcond12, label %for.cond4.preheader, label %for.inc22
for.inc22: ; preds = %for.inc19
- %scevgep11 = getelementptr i32* %B.addr.08, i64 125000
+ %scevgep11 = getelementptr i32, i32* %B.addr.08, i64 125000
%inc23 = add nsw i64 %i.07, 1
%exitcond13 = icmp ne i64 %inc23, 50
br i1 %exitcond13, label %for.cond1.preheader, label %for.end24
@@ -118,33 +118,33 @@ for.body9: ; preds = %for.cond7.preheader
%B.addr.31 = phi i32* [ %B.addr.24, %for.cond7.preheader ], [ %incdec.ptr, %for.body9 ]
%conv = trunc i64 %i.07 to i32
%add = add nsw i64 %j.05, %k.03
- %arrayidx11 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.07, i64 %i.07, i64 %add
+ %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.07, i64 %i.07, i64 %add
store i32 %conv, i32* %arrayidx11, align 4
%mul = shl nsw i64 %j.05, 1
%sub = sub nsw i64 %mul, %l.02
%add12 = add nsw i64 %i.07, 10
- %arrayidx15 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub
- %0 = load i32* %arrayidx15, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1
+ %arrayidx15 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub
+ %0 = load i32, i32* %arrayidx15, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
%exitcond = icmp ne i64 %inc, 50
br i1 %exitcond, label %for.body9, label %for.inc16
for.inc16: ; preds = %for.body9
- %scevgep = getelementptr i32* %B.addr.24, i64 50
+ %scevgep = getelementptr i32, i32* %B.addr.24, i64 50
%inc17 = add nsw i64 %k.03, 1
%exitcond10 = icmp ne i64 %inc17, 50
br i1 %exitcond10, label %for.cond7.preheader, label %for.inc19
for.inc19: ; preds = %for.inc16
- %scevgep9 = getelementptr i32* %B.addr.16, i64 2500
+ %scevgep9 = getelementptr i32, i32* %B.addr.16, i64 2500
%inc20 = add nsw i64 %j.05, 1
%exitcond12 = icmp ne i64 %inc20, 50
br i1 %exitcond12, label %for.cond4.preheader, label %for.inc22
for.inc22: ; preds = %for.inc19
- %scevgep11 = getelementptr i32* %B.addr.08, i64 125000
+ %scevgep11 = getelementptr i32, i32* %B.addr.08, i64 125000
%inc23 = add nsw i64 %i.07, 1
%exitcond13 = icmp ne i64 %inc23, 50
br i1 %exitcond13, label %for.cond1.preheader, label %for.end24
@@ -192,33 +192,33 @@ for.body9: ; preds = %for.cond7.preheader
%B.addr.31 = phi i32* [ %B.addr.24, %for.cond7.preheader ], [ %incdec.ptr, %for.body9 ]
%conv = trunc i64 %i.07 to i32
%add = add nsw i64 %i.07, %k.03
- %arrayidx12 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 %i.07, i64 %i.07, i64 %add, i64 %l.02
+ %arrayidx12 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 %i.07, i64 %i.07, i64 %add, i64 %l.02
store i32 %conv, i32* %arrayidx12, align 4
%add13 = add nsw i64 %l.02, 10
%add14 = add nsw i64 %j.05, %k.03
%add15 = add nsw i64 %i.07, 10
- %arrayidx19 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add15, i64 %add14, i64 %add13
- %0 = load i32* %arrayidx19, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1
+ %arrayidx19 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add15, i64 %add14, i64 %add13
+ %0 = load i32, i32* %arrayidx19, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
%exitcond = icmp ne i64 %inc, 50
br i1 %exitcond, label %for.body9, label %for.inc20
for.inc20: ; preds = %for.body9
- %scevgep = getelementptr i32* %B.addr.24, i64 50
+ %scevgep = getelementptr i32, i32* %B.addr.24, i64 50
%inc21 = add nsw i64 %k.03, 1
%exitcond10 = icmp ne i64 %inc21, 50
br i1 %exitcond10, label %for.cond7.preheader, label %for.inc23
for.inc23: ; preds = %for.inc20
- %scevgep9 = getelementptr i32* %B.addr.16, i64 2500
+ %scevgep9 = getelementptr i32, i32* %B.addr.16, i64 2500
%inc24 = add nsw i64 %j.05, 1
%exitcond12 = icmp ne i64 %inc24, 50
br i1 %exitcond12, label %for.cond4.preheader, label %for.inc26
for.inc26: ; preds = %for.inc23
- %scevgep11 = getelementptr i32* %B.addr.08, i64 125000
+ %scevgep11 = getelementptr i32, i32* %B.addr.08, i64 125000
%inc27 = add nsw i64 %i.07, 1
%exitcond13 = icmp ne i64 %inc27, 50
br i1 %exitcond13, label %for.cond1.preheader, label %for.end28
@@ -267,33 +267,33 @@ for.body9: ; preds = %for.cond7.preheader
%conv = trunc i64 %i.07 to i32
%add = add nsw i64 %l.02, %k.03
%add10 = add nsw i64 %i.07, %k.03
- %arrayidx13 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 %i.07, i64 %i.07, i64 %add10, i64 %add
+ %arrayidx13 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 %i.07, i64 %i.07, i64 %add10, i64 %add
store i32 %conv, i32* %arrayidx13, align 4
%add14 = add nsw i64 %l.02, 10
%add15 = add nsw i64 %j.05, %k.03
%add16 = add nsw i64 %i.07, 10
- %arrayidx20 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add16, i64 %add15, i64 %add14
- %0 = load i32* %arrayidx20, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1
+ %arrayidx20 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add16, i64 %add15, i64 %add14
+ %0 = load i32, i32* %arrayidx20, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
%exitcond = icmp ne i64 %inc, 50
br i1 %exitcond, label %for.body9, label %for.inc21
for.inc21: ; preds = %for.body9
- %scevgep = getelementptr i32* %B.addr.24, i64 50
+ %scevgep = getelementptr i32, i32* %B.addr.24, i64 50
%inc22 = add nsw i64 %k.03, 1
%exitcond10 = icmp ne i64 %inc22, 50
br i1 %exitcond10, label %for.cond7.preheader, label %for.inc24
for.inc24: ; preds = %for.inc21
- %scevgep9 = getelementptr i32* %B.addr.16, i64 2500
+ %scevgep9 = getelementptr i32, i32* %B.addr.16, i64 2500
%inc25 = add nsw i64 %j.05, 1
%exitcond12 = icmp ne i64 %inc25, 50
br i1 %exitcond12, label %for.cond4.preheader, label %for.inc27
for.inc27: ; preds = %for.inc24
- %scevgep11 = getelementptr i32* %B.addr.08, i64 125000
+ %scevgep11 = getelementptr i32, i32* %B.addr.08, i64 125000
%inc28 = add nsw i64 %i.07, 1
%exitcond13 = icmp ne i64 %inc28, 50
br i1 %exitcond13, label %for.cond1.preheader, label %for.end29
diff --git a/test/Analysis/DependenceAnalysis/StrongSIV.ll b/test/Analysis/DependenceAnalysis/StrongSIV.ll
index f499e84..78befa5 100644
--- a/test/Analysis/DependenceAnalysis/StrongSIV.ll
+++ b/test/Analysis/DependenceAnalysis/StrongSIV.ll
@@ -28,12 +28,12 @@ for.body: ; preds = %for.body.preheader,
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%0 = add nsw i64 %indvars.iv, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %0
%1 = trunc i64 %indvars.iv to i32
store i32 %1, i32* %arrayidx, align 4
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %2 = load i32, i32* %arrayidx3, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %2, i32* %B.addr.02, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, %n
@@ -72,11 +72,11 @@ for.body: ; preds = %for.body.preheader,
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv2 = trunc i64 %i.03 to i32
%add = add nsw i64 %i.03, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv2, i32* %arrayidx, align 4
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %i.03
- %1 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %i.03
+ %1 = load i32, i32* %arrayidx3, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %1, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %0
@@ -114,11 +114,11 @@ for.body: ; preds = %for.body.preheader,
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
%add = add i64 %i.03, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.03
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.03
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -155,12 +155,12 @@ for.body: ; preds = %for.body.preheader,
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%0 = add nsw i64 %indvars.iv, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %0
%1 = trunc i64 %indvars.iv to i32
store i32 %1, i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %indvars.iv
- %2 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %2 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %2, i32* %B.addr.02, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -195,11 +195,11 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%add = add i64 %i.02, 19
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.02
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 19
@@ -230,11 +230,11 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%add = add i64 %i.02, 19
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.02
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 20
@@ -266,12 +266,12 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
%add = add i64 %mul, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul1 = shl i64 %i.02, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %mul1
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %mul1
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 20
@@ -303,12 +303,12 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
%add = add i64 %mul, 7
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul1 = shl i64 %i.02, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %mul1
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %mul1
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 20
@@ -339,11 +339,11 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%add = add i64 %i.02, %n
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.02
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 20
@@ -378,13 +378,13 @@ for.body: ; preds = %for.body.preheader,
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
%add = add i64 %i.03, %n
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %n, 1
%add1 = add i64 %i.03, %mul
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %add1
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add1
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -419,13 +419,13 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, %n
%add = add i64 %mul, 5
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul1 = mul i64 %i.02, %n
%add2 = add i64 %mul1, 5
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %add2
- %0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add2
+ %0 = load i32, i32* %arrayidx3, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 1000
diff --git a/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll b/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll
index 5443909..6e8b98c 100644
--- a/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll
+++ b/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll
@@ -41,7 +41,7 @@ for.body: ; preds = %for.body.preheader,
%conv = trunc i64 %i.05 to i32
%mul = shl nsw i64 %i.05, 1
%add = add i64 %mul, %n1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
@@ -52,9 +52,9 @@ for.body4: ; preds = %for.body4.preheader
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%mul56 = add i64 %j.03, %n1
%add7 = mul i64 %mul56, 3
- %arrayidx8 = getelementptr inbounds i32* %A, i64 %add7
- %0 = load i32* %arrayidx8, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7
+ %0 = load i32, i32* %arrayidx8, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc10 = add nsw i64 %j.03, 1
%exitcond7 = icmp ne i64 %inc10, %n2
@@ -105,7 +105,7 @@ for.body: ; preds = %for.body.preheader,
%mul = shl nsw i64 %i.05, 1
%mul1 = mul i64 %n2, 5
%add = add i64 %mul, %mul1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
@@ -117,9 +117,9 @@ for.body5: ; preds = %for.body5.preheader
%mul6 = mul nsw i64 %j.03, 3
%mul7 = shl i64 %n2, 1
%add8 = add i64 %mul6, %mul7
- %arrayidx9 = getelementptr inbounds i32* %A, i64 %add8
- %0 = load i32* %arrayidx9, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 %add8
+ %0 = load i32, i32* %arrayidx9, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc11 = add nsw i64 %j.03, 1
%exitcond6 = icmp ne i64 %inc11, %n2
@@ -169,7 +169,7 @@ for.body: ; preds = %for.body.preheader,
%conv = trunc i64 %i.05 to i32
%mul = shl nsw i64 %i.05, 1
%sub = sub i64 %mul, %n2
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
@@ -180,9 +180,9 @@ for.body4: ; preds = %for.body4.preheader
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%mul6 = shl i64 %n1, 1
%add = sub i64 %mul6, %j.03
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx7, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc9 = add nsw i64 %j.03, 1
%exitcond6 = icmp ne i64 %inc9, %n2
@@ -231,7 +231,7 @@ for.body: ; preds = %for.body.preheader,
%i.05 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
%conv = trunc i64 %i.05 to i32
%add = sub i64 %n2, %i.05
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
@@ -241,9 +241,9 @@ for.body4: ; preds = %for.body4.preheader
%j.03 = phi i64 [ %inc8, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub5 = sub i64 %j.03, %n1
- %arrayidx6 = getelementptr inbounds i32* %A, i64 %sub5
- %0 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %sub5
+ %0 = load i32, i32* %arrayidx6, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc8 = add nsw i64 %j.03, 1
%exitcond6 = icmp ne i64 %inc8, %n2
@@ -293,7 +293,7 @@ for.body: ; preds = %for.body.preheader,
%conv = trunc i64 %i.05 to i32
%mul = shl i64 %n1, 1
%add = sub i64 %mul, %i.05
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
@@ -303,9 +303,9 @@ for.body4: ; preds = %for.body4.preheader
%j.03 = phi i64 [ %inc9, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%add6 = sub i64 %n1, %j.03
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add6
- %0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6
+ %0 = load i32, i32* %arrayidx7, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc9 = add nsw i64 %j.03, 1
%exitcond6 = icmp ne i64 %inc9, %n2
@@ -354,7 +354,7 @@ for.body: ; preds = %for.body.preheader,
%i.05 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
%conv = trunc i64 %i.05 to i32
%add = sub i64 %n2, %i.05
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
@@ -365,9 +365,9 @@ for.body4: ; preds = %for.body4.preheader
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%mul = shl i64 %n2, 1
%add6 = sub i64 %mul, %j.03
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add6
- %0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6
+ %0 = load i32, i32* %arrayidx7, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc9 = add nsw i64 %j.03, 1
%exitcond6 = icmp ne i64 %inc9, %n2
@@ -417,19 +417,19 @@ for.body3: ; preds = %for.body3.preheader
%conv = trunc i64 %i.05 to i32
%sub = sub nsw i64 %j.03, %i.05
%add = add i64 %sub, %n2
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %n2, 1
- %arrayidx4 = getelementptr inbounds i32* %A, i64 %mul
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %mul
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %0, i32* %B.addr.12, align 4
%inc = add nsw i64 %j.03, 1
%exitcond = icmp ne i64 %inc, %n2
br i1 %exitcond, label %for.body3, label %for.inc5.loopexit
for.inc5.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.06, i64 %n2
+ %scevgep = getelementptr i32, i32* %B.addr.06, i64 %n2
br label %for.inc5
for.inc5: ; preds = %for.inc5.loopexit, %for.cond1.preheader
diff --git a/test/Analysis/DependenceAnalysis/SymbolicSIV.ll b/test/Analysis/DependenceAnalysis/SymbolicSIV.ll
index 297096c..711d0fa 100644
--- a/test/Analysis/DependenceAnalysis/SymbolicSIV.ll
+++ b/test/Analysis/DependenceAnalysis/SymbolicSIV.ll
@@ -30,13 +30,13 @@ for.body: ; preds = %for.body.preheader,
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %i.03, 1
%add = add i64 %mul, %n
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul14 = add i64 %i.03, %n
%add3 = mul i64 %mul14, 3
- %arrayidx4 = getelementptr inbounds i32* %A, i64 %add3
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %add3
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -76,14 +76,14 @@ for.body: ; preds = %for.body.preheader,
%mul = shl nsw i64 %i.03, 1
%mul1 = mul i64 %n, 5
%add = add i64 %mul, %mul1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul2 = mul nsw i64 %i.03, 3
%mul3 = shl i64 %n, 1
%add4 = add i64 %mul2, %mul3
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %add4
- %0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %add4
+ %0 = load i32, i32* %arrayidx5, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -122,13 +122,13 @@ for.body: ; preds = %for.body.preheader,
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %i.03, 1
%sub = sub i64 %mul, %n
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%mul2 = shl i64 %n, 1
%add = sub i64 %mul2, %i.03
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx3, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -168,13 +168,13 @@ for.body: ; preds = %for.body.preheader,
%mul = mul nsw i64 %i.03, -2
%add = add i64 %mul, %n
%add1 = add i64 %add, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add1
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add1
store i32 %conv, i32* %arrayidx, align 4
%mul2 = shl i64 %n, 1
%sub = sub i64 %i.03, %mul2
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx3, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -214,12 +214,12 @@ for.body: ; preds = %for.body.preheader,
%mul = mul nsw i64 %i.03, -2
%mul1 = mul i64 %n, 3
%add = add i64 %mul, %mul1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%add2 = sub i64 %n, %i.03
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %add2
- %0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add2
+ %0 = load i32, i32* %arrayidx3, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -259,13 +259,13 @@ for.body: ; preds = %for.body.preheader,
%mul = mul nsw i64 %i.03, -2
%mul1 = shl i64 %n, 1
%sub = sub i64 %mul, %mul1
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%sub2 = sub nsw i64 0, %i.03
%sub3 = sub i64 %sub2, %n
- %arrayidx4 = getelementptr inbounds i32* %A, i64 %sub3
- %0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %sub3
+ %0 = load i32, i32* %arrayidx4, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -306,12 +306,12 @@ for.body: ; preds = %for.body.preheader,
%conv = trunc i64 %i.03 to i32
%add = add i64 %i.03, %n
%add1 = add i64 %add, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add1
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add1
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 0, %i.03
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -351,16 +351,16 @@ for.body: ; preds = %for.body.preheader,
%mul = shl i64 %N, 2
%mul1 = mul i64 %mul, %i.03
%add = add i64 %mul1, %M
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul2 = shl i64 %N, 2
%mul3 = mul i64 %mul2, %i.03
%mul4 = mul i64 %M, 3
%add5 = add i64 %mul3, %mul4
%add6 = add i64 %add5, 1
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add6
- %0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6
+ %0 = load i32, i32* %arrayidx7, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -400,16 +400,16 @@ for.body: ; preds = %for.body.preheader,
%mul = shl i64 %N, 1
%mul1 = mul i64 %mul, %i.03
%add = add i64 %mul1, %M
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul2 = shl i64 %N, 1
%mul3 = mul i64 %mul2, %i.03
%0 = mul i64 %M, -3
%sub = add i64 %mul3, %0
%add5 = add i64 %sub, 2
- %arrayidx6 = getelementptr inbounds i32* %A, i64 %add5
- %1 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %add5
+ %1 = load i32, i32* %arrayidx6, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %1, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
diff --git a/test/Analysis/DependenceAnalysis/UsefulGEP.ll b/test/Analysis/DependenceAnalysis/UsefulGEP.ll
new file mode 100644
index 0000000..cd46a27
--- /dev/null
+++ b/test/Analysis/DependenceAnalysis/UsefulGEP.ll
@@ -0,0 +1,51 @@
+; RUN: opt < %s -analyze -basicaa -da
+;; Check this doesn't crash.
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+;; struct s {
+;; int A[10][10];
+;; int C[10][10][10];
+;; } S;
+
+;; void dep_constraint_crash_test(int k,int N) {
+;; for( int i=0;i<N;i++)
+;; for( int j=0;j<N;j++)
+;; S.A[0][0] = S.C[0][0][k];
+;; }
+
+
+%struct.s = type { [10 x [10 x i32]], [10 x [10 x [10 x i32]]] }
+
+@S = common global %struct.s zeroinitializer
+
+define void @dep_constraint_crash_test(i32 %k, i32 %N) {
+entry:
+ %cmp12 = icmp sgt i32 %N, 0
+ br i1 %cmp12, label %for.cond1.preheader.lr.ph, label %for.end6
+
+for.cond1.preheader.lr.ph:
+ %idxprom = sext i32 %k to i64
+ %arrayidx = getelementptr inbounds %struct.s, %struct.s* @S, i64 0, i32 1, i64 0, i64 0, i64 %idxprom
+ br label %for.body3.preheader
+
+for.body3.preheader:
+ %i.013 = phi i32 [ 0, %for.cond1.preheader.lr.ph ], [ %inc5, %for.inc4 ]
+ br label %for.body3
+
+for.body3:
+ %j.011 = phi i32 [ %inc, %for.body3 ], [ 0, %for.body3.preheader ]
+ %0 = load i32, i32* %arrayidx
+ store i32 %0, i32* getelementptr inbounds (%struct.s, %struct.s* @S, i64 0, i32 0, i64 0, i64 0)
+ %inc = add nuw nsw i32 %j.011, 1
+ %exitcond = icmp eq i32 %inc, %N
+ br i1 %exitcond, label %for.inc4, label %for.body3
+
+for.inc4:
+ %inc5 = add nuw nsw i32 %i.013, 1
+ %exitcond14 = icmp eq i32 %inc5, %N
+ br i1 %exitcond14, label %for.end6, label %for.body3.preheader
+
+for.end6:
+ ret void
+}
diff --git a/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll b/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll
index 8b2e43f..5b81ec1 100644
--- a/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll
+++ b/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll
@@ -30,13 +30,13 @@ for.body: ; preds = %for.body.preheader,
%conv = trunc i64 %i.03 to i32
%mul = mul i64 %i.03, %n
%add = add i64 %mul, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul1 = mul i64 %i.03, %n
%sub = sub i64 1, %mul1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -75,13 +75,13 @@ for.body: ; preds = %for.body.preheader,
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
%add = add i64 %i.03, %n
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%add1 = add i64 %n, 1
%sub = sub i64 %add1, %i.03
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -114,12 +114,12 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.02
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 6, %i.02
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 3
@@ -149,12 +149,12 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.02
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 6, %i.02
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 4
@@ -184,12 +184,12 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.02
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 -6, %i.02
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 10
@@ -224,13 +224,13 @@ for.body: ; preds = %for.body.preheader,
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
%mul = mul i64 %i.03, 3
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%0 = mul i64 %i.03, -3
%sub = add i64 %0, 5
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub
- %1 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %1 = load i32, i32* %arrayidx2, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %1, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -264,12 +264,12 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.02
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 5, %i.02
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 4
diff --git a/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll b/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll
index bc85e6c..8adb7f7 100644
--- a/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll
+++ b/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll
@@ -26,11 +26,11 @@ for.body: ; preds = %entry, %for.body
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
%add = add i64 %mul, 10
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 30
@@ -66,11 +66,11 @@ for.body: ; preds = %for.body.preheader,
%conv = trunc i64 %i.03 to i32
%mul = mul i64 %i.03, %n
%add = add i64 %mul, 10
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -104,11 +104,11 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 5
@@ -139,11 +139,11 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 6
@@ -174,11 +174,11 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 7
@@ -209,11 +209,11 @@ for.body: ; preds = %entry, %for.body
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 -10
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 -10
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 7
@@ -248,11 +248,11 @@ for.body: ; preds = %for.body.preheader,
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
%mul = mul i64 %i.03, 3
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
diff --git a/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll b/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll
index 2b3b2d0..ac261b0 100644
--- a/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll
+++ b/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll
@@ -24,13 +24,13 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
%add = add i64 %mul, 10
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 30
@@ -64,13 +64,13 @@ for.body: ; preds = %for.body.preheader,
%i.03 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = mul i64 %i.03, %n
%add = add i64 %mul, 10
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
@@ -103,12 +103,12 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 5
@@ -138,12 +138,12 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 6
@@ -173,12 +173,12 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 7
@@ -208,12 +208,12 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 -10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 -10
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 7
@@ -247,12 +247,12 @@ for.body: ; preds = %for.body.preheader,
%i.03 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = mul i64 %i.03, 3
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul
- %0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
+ %0 = load i32, i32* %arrayidx1, align 4
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
diff --git a/test/Analysis/DependenceAnalysis/ZIV.ll b/test/Analysis/DependenceAnalysis/ZIV.ll
index 5463c63..b321641 100644
--- a/test/Analysis/DependenceAnalysis/ZIV.ll
+++ b/test/Analysis/DependenceAnalysis/ZIV.ll
@@ -11,7 +11,7 @@ target triple = "x86_64-apple-macosx10.6.0"
define void @z0(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp {
entry:
%add = add i64 %n, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
@@ -22,8 +22,8 @@ entry:
; CHECK: da analyze - none!
%add1 = add i64 %n, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %add1
- %0 = load i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add1
+ %0 = load i32, i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -34,7 +34,7 @@ entry:
define void @z1(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp {
entry:
- %arrayidx = getelementptr inbounds i32* %A, i64 %n
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %n
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
@@ -45,8 +45,8 @@ entry:
; CHECK: da analyze - none!
%add = add i64 %n, 1
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
+ %0 = load i32, i32* %arrayidx1, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -57,7 +57,7 @@ entry:
define void @z2(i32* %A, i32* %B, i64 %n, i64 %m) nounwind uwtable ssp {
entry:
- %arrayidx = getelementptr inbounds i32* %A, i64 %n
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %n
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
@@ -67,8 +67,8 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %m
- %0 = load i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %m
+ %0 = load i32, i32* %arrayidx1, align 4
store i32 %0, i32* %B, align 4
ret void
}
diff --git a/test/Analysis/Dominators/invoke.ll b/test/Analysis/Dominators/invoke.ll
index da0b246..ce5f992 100644
--- a/test/Analysis/Dominators/invoke.ll
+++ b/test/Analysis/Dominators/invoke.ll
@@ -7,7 +7,7 @@ define void @f() {
invoke void @__dynamic_cast()
to label %bb1 unwind label %bb2
bb1:
- %Hidden = getelementptr inbounds i32* %v1, i64 1
+ %Hidden = getelementptr inbounds i32, i32* %v1, i64 1
ret void
bb2:
%lpad.loopexit80 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
diff --git a/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll b/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll
index d51c159..513ec86 100644
--- a/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll
+++ b/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll
@@ -3,7 +3,7 @@
@g = internal global i32 0 ; <i32*> [#uses=2]
define i32 @r() {
- %tmp = load i32* @g ; <i32> [#uses=1]
+ %tmp = load i32, i32* @g ; <i32> [#uses=1]
ret i32 %tmp
}
diff --git a/test/Analysis/GlobalsModRef/aliastest.ll b/test/Analysis/GlobalsModRef/aliastest.ll
index 4cfed71..3474e13 100644
--- a/test/Analysis/GlobalsModRef/aliastest.ll
+++ b/test/Analysis/GlobalsModRef/aliastest.ll
@@ -9,6 +9,6 @@ define i32 @test(i32* %P) {
; CHECK-NEXT: ret i32 7
store i32 7, i32* %P
store i32 12, i32* @X
- %V = load i32* %P ; <i32> [#uses=1]
+ %V = load i32, i32* %P ; <i32> [#uses=1]
ret i32 %V
}
diff --git a/test/Analysis/GlobalsModRef/chaining-analysis.ll b/test/Analysis/GlobalsModRef/chaining-analysis.ll
index aeb76e4..26671da 100644
--- a/test/Analysis/GlobalsModRef/chaining-analysis.ll
+++ b/test/Analysis/GlobalsModRef/chaining-analysis.ll
@@ -14,7 +14,7 @@ define i32 @test(i32* %P) {
; CHECK-NEXT: ret i32 12
store i32 12, i32* @X
call double @doesnotmodX( double 1.000000e+00 ) ; <double>:1 [#uses=0]
- %V = load i32* @X ; <i32> [#uses=1]
+ %V = load i32, i32* @X ; <i32> [#uses=1]
ret i32 %V
}
diff --git a/test/Analysis/GlobalsModRef/indirect-global.ll b/test/Analysis/GlobalsModRef/indirect-global.ll
index 48ac6dd..0281323 100644
--- a/test/Analysis/GlobalsModRef/indirect-global.ll
+++ b/test/Analysis/GlobalsModRef/indirect-global.ll
@@ -12,11 +12,11 @@ define void @test() {
define i32 @test1(i32* %P) {
; CHECK: ret i32 0
- %g1 = load i32** @G ; <i32*> [#uses=2]
- %h1 = load i32* %g1 ; <i32> [#uses=1]
+ %g1 = load i32*, i32** @G ; <i32*> [#uses=2]
+ %h1 = load i32, i32* %g1 ; <i32> [#uses=1]
store i32 123, i32* %P
- %g2 = load i32** @G ; <i32*> [#uses=0]
- %h2 = load i32* %g1 ; <i32> [#uses=1]
+ %g2 = load i32*, i32** @G ; <i32*> [#uses=0]
+ %h2 = load i32, i32* %g1 ; <i32> [#uses=1]
%X = sub i32 %h1, %h2 ; <i32> [#uses=1]
ret i32 %X
}
diff --git a/test/Analysis/GlobalsModRef/modreftest.ll b/test/Analysis/GlobalsModRef/modreftest.ll
index 3eed916..74101e2 100644
--- a/test/Analysis/GlobalsModRef/modreftest.ll
+++ b/test/Analysis/GlobalsModRef/modreftest.ll
@@ -9,7 +9,7 @@ define i32 @test(i32* %P) {
; CHECK-NEXT: ret i32 12
store i32 12, i32* @X
call void @doesnotmodX( )
- %V = load i32* @X ; <i32> [#uses=1]
+ %V = load i32, i32* @X ; <i32> [#uses=1]
ret i32 %V
}
diff --git a/test/Analysis/GlobalsModRef/pr12351.ll b/test/Analysis/GlobalsModRef/pr12351.ll
index c221f4c..8f92277 100644
--- a/test/Analysis/GlobalsModRef/pr12351.ll
+++ b/test/Analysis/GlobalsModRef/pr12351.ll
@@ -9,7 +9,7 @@ define void @foo(i8* %x, i8* %y) {
define void @bar(i8* %y, i8* %z) {
%x = alloca i8
call void @foo(i8* %x, i8* %y)
- %t = load i8* %x
+ %t = load i8, i8* %x
store i8 %t, i8* %y
; CHECK: store i8 %t, i8* %y
ret void
@@ -19,8 +19,8 @@ define void @bar(i8* %y, i8* %z) {
define i32 @foo2() {
%foo = alloca i32
call void @bar2(i32* %foo)
- %t0 = load i32* %foo, align 4
-; CHECK: %t0 = load i32* %foo, align 4
+ %t0 = load i32, i32* %foo, align 4
+; CHECK: %t0 = load i32, i32* %foo, align 4
ret i32 %t0
}
diff --git a/test/Analysis/GlobalsModRef/volatile-instrs.ll b/test/Analysis/GlobalsModRef/volatile-instrs.ll
index 46d3d76..a331bf3 100644
--- a/test/Analysis/GlobalsModRef/volatile-instrs.ll
+++ b/test/Analysis/GlobalsModRef/volatile-instrs.ll
@@ -22,9 +22,9 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
define i32 @main() nounwind uwtable ssp {
main_entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast (%struct.anon* @b to i8*), i8* bitcast (%struct.anon* @a to i8*), i64 12, i32 4, i1 false)
- %0 = load volatile i32* getelementptr inbounds (%struct.anon* @b, i64 0, i32 0), align 4
+ %0 = load volatile i32, i32* getelementptr inbounds (%struct.anon, %struct.anon* @b, i64 0, i32 0), align 4
store i32 %0, i32* @c, align 4
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast (%struct.anon* @b to i8*), i8* bitcast (%struct.anon* @a to i8*), i64 12, i32 4, i1 false) nounwind
- %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %0) nounwind
+ %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %0) nounwind
ret i32 0
}
diff --git a/test/Analysis/LazyCallGraph/basic.ll b/test/Analysis/LazyCallGraph/basic.ll
index b8108d9..6e2cb90 100644
--- a/test/Analysis/LazyCallGraph/basic.ll
+++ b/test/Analysis/LazyCallGraph/basic.ll
@@ -118,10 +118,10 @@ define void @test2() {
; CHECK-NEXT: -> f1
; CHECK-NOT: ->
- load i8** bitcast (void ()** @g to i8**)
- load i8** bitcast (void ()** getelementptr ([4 x void ()*]* @g1, i32 0, i32 2) to i8**)
- load i8** bitcast (void ()** getelementptr ({i8, void ()*, i8}* @g2, i32 0, i32 1) to i8**)
- load i8** bitcast (void ()** @h to i8**)
+ load i8*, i8** bitcast (void ()** @g to i8**)
+ load i8*, i8** bitcast (void ()** getelementptr ([4 x void ()*], [4 x void ()*]* @g1, i32 0, i32 2) to i8**)
+ load i8*, i8** bitcast (void ()** getelementptr ({i8, void ()*, i8}, {i8, void ()*, i8}* @g2, i32 0, i32 1) to i8**)
+ load i8*, i8** bitcast (void ()** @h to i8**)
ret void
}
diff --git a/test/Analysis/Lint/cppeh-catch-intrinsics-clean.ll b/test/Analysis/Lint/cppeh-catch-intrinsics-clean.ll
index e398d71..8cd44c8 100644
--- a/test/Analysis/Lint/cppeh-catch-intrinsics-clean.ll
+++ b/test/Analysis/Lint/cppeh-catch-intrinsics-clean.ll
@@ -5,7 +5,7 @@
target triple = "x86_64-pc-windows-msvc"
-declare i8* @llvm.eh.begincatch(i8*)
+declare void @llvm.eh.begincatch(i8*, i8*)
declare void @llvm.eh.endcatch()
@@ -27,7 +27,7 @@ lpad: ; preds = %entry
br i1 %matches, label %catch, label %eh.resume
catch: ; preds = %lpad
- %2 = call i8* @llvm.eh.begincatch(i8* %exn)
+ call void @llvm.eh.begincatch(i8* %exn, i8* null)
call void @_Z10handle_intv()
br label %invoke.cont2
@@ -77,7 +77,7 @@ lpad1: ; preds = %entry
catch: ; preds = %lpad, %lpad1
%exn2 = phi i8* [%exn, %lpad], [%exn1, %lpad1]
%sel2 = phi i32 [%sel, %lpad], [%sel1, %lpad1]
- %3 = call i8* @llvm.eh.begincatch(i8* %exn2)
+ call void @llvm.eh.begincatch(i8* %exn2, i8* null)
call void @_Z10handle_intv()
%matches1 = icmp eq i32 %sel2, 0
br i1 %matches1, label %invoke.cont2, label %invoke.cont3
diff --git a/test/Analysis/Lint/cppeh-catch-intrinsics.ll b/test/Analysis/Lint/cppeh-catch-intrinsics.ll
index 5ab73e35..3a0c487 100644
--- a/test/Analysis/Lint/cppeh-catch-intrinsics.ll
+++ b/test/Analysis/Lint/cppeh-catch-intrinsics.ll
@@ -6,7 +6,7 @@
target triple = "x86_64-pc-windows-msvc"
-declare i8* @llvm.eh.begincatch(i8*)
+declare void @llvm.eh.begincatch(i8*, i8*)
declare void @llvm.eh.endcatch()
@@ -15,7 +15,7 @@ declare void @llvm.eh.endcatch()
; Function Attrs: uwtable
define void @test_missing_endcatch() {
; CHECK: Some paths from llvm.eh.begincatch may not reach llvm.eh.endcatch
-; CHECK-NEXT: %2 = call i8* @llvm.eh.begincatch(i8* %exn)
+; CHECK-NEXT: call void @llvm.eh.begincatch(i8* %exn, i8* null)
entry:
invoke void @_Z9may_throwv()
to label %try.cont unwind label %lpad
@@ -30,7 +30,7 @@ lpad: ; preds = %entry
br i1 %matches, label %catch, label %eh.resume
catch: ; preds = %lpad
- %2 = call i8* @llvm.eh.begincatch(i8* %exn)
+ call void @llvm.eh.begincatch(i8* %exn, i8* null)
call void @_Z10handle_intv()
br label %invoke.cont2
@@ -79,8 +79,8 @@ eh.resume: ; preds = %catch.dispatch
; Function Attrs: uwtable
define void @test_multiple_begin() {
; CHECK: llvm.eh.begincatch may be called a second time before llvm.eh.endcatch
-; CHECK-NEXT: %2 = call i8* @llvm.eh.begincatch(i8* %exn)
-; CHECK-NEXT: %3 = call i8* @llvm.eh.begincatch(i8* %exn)
+; CHECK-NEXT: call void @llvm.eh.begincatch(i8* %exn, i8* null)
+; CHECK-NEXT: call void @llvm.eh.begincatch(i8* %exn, i8* null)
entry:
invoke void @_Z9may_throwv()
to label %try.cont unwind label %lpad
@@ -95,12 +95,12 @@ lpad: ; preds = %entry
br i1 %matches, label %catch, label %eh.resume
catch: ; preds = %lpad
- %2 = call i8* @llvm.eh.begincatch(i8* %exn)
+ call void @llvm.eh.begincatch(i8* %exn, i8* null)
call void @_Z10handle_intv()
br label %invoke.cont2
invoke.cont2: ; preds = %catch
- %3 = call i8* @llvm.eh.begincatch(i8* %exn)
+ call void @llvm.eh.begincatch(i8* %exn, i8* null)
call void @llvm.eh.endcatch()
br label %try.cont
@@ -130,7 +130,7 @@ lpad: ; preds = %entry
br i1 %matches, label %catch, label %eh.resume
catch: ; preds = %lpad
- %2 = call i8* @llvm.eh.begincatch(i8* %exn)
+ call void @llvm.eh.begincatch(i8* %exn, i8* null)
call void @_Z10handle_intv()
call void @llvm.eh.endcatch()
br label %invoke.cont2
@@ -150,10 +150,10 @@ eh.resume: ; preds = %catch.dispatch
; Function Attrs: uwtable
define void @test_begincatch_without_lpad() {
; CHECK: llvm.eh.begincatch may be reachable without passing a landingpad
-; CHECK-NEXT: %0 = call i8* @llvm.eh.begincatch(i8* %exn)
+; CHECK-NEXT: call void @llvm.eh.begincatch(i8* %exn, i8* null)
entry:
%exn = alloca i8
- %0 = call i8* @llvm.eh.begincatch(i8* %exn)
+ call void @llvm.eh.begincatch(i8* %exn, i8* null)
call void @_Z10handle_intv()
br label %invoke.cont2
@@ -168,7 +168,7 @@ try.cont: ; preds = %invoke.cont2, %entr
; Function Attrs: uwtable
define void @test_branch_to_begincatch_with_no_lpad(i32 %fake.sel) {
; CHECK: llvm.eh.begincatch may be reachable without passing a landingpad
-; CHECK-NEXT: %3 = call i8* @llvm.eh.begincatch(i8* %exn2)
+; CHECK-NEXT: call void @llvm.eh.begincatch(i8* %exn2, i8* null)
entry:
%fake.exn = alloca i8
invoke void @_Z9may_throwv()
@@ -189,7 +189,7 @@ lpad: ; preds = %entry
catch: ; preds = %lpad, %entry
%exn2 = phi i8* [%exn, %lpad], [%fake.exn, %entry]
%sel2 = phi i32 [%sel, %lpad], [%fake.sel, %entry]
- %3 = call i8* @llvm.eh.begincatch(i8* %exn2)
+ call void @llvm.eh.begincatch(i8* %exn2, i8* null)
call void @_Z10handle_intv()
%matches1 = icmp eq i32 %sel2, 0
br i1 %matches1, label %invoke.cont2, label %invoke.cont3
@@ -213,7 +213,7 @@ eh.resume: ; preds = %catch.dispatch
; Function Attrs: uwtable
define void @test_branch_missing_endcatch() {
; CHECK: Some paths from llvm.eh.begincatch may not reach llvm.eh.endcatch
-; CHECK-NEXT: %3 = call i8* @llvm.eh.begincatch(i8* %exn2)
+; CHECK-NEXT: call void @llvm.eh.begincatch(i8* %exn2, i8* null)
entry:
invoke void @_Z9may_throwv()
to label %invoke.cont unwind label %lpad
@@ -247,7 +247,7 @@ lpad1: ; preds = %entry
catch: ; preds = %lpad, %lpad1
%exn2 = phi i8* [%exn, %lpad], [%exn1, %lpad1]
%sel2 = phi i32 [%sel, %lpad], [%sel1, %lpad1]
- %3 = call i8* @llvm.eh.begincatch(i8* %exn2)
+ call void @llvm.eh.begincatch(i8* %exn2, i8* null)
call void @_Z10handle_intv()
%matches1 = icmp eq i32 %sel2, 0
br i1 %matches1, label %invoke.cont2, label %invoke.cont3
diff --git a/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll b/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll
index f503a5c..238f3f4 100644
--- a/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll
+++ b/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll
@@ -20,25 +20,25 @@ target triple = "x86_64-apple-macosx10.10.0"
define void @f() {
entry:
- %a = load i32** @A, align 8
- %b = load i32** @B, align 8
+ %a = load i32*, i32** @A, align 8
+ %b = load i32*, i32** @B, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32* %a, i64 %storemerge3
- %loadA = load i32* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %storemerge3
+ %loadA = load i32, i32* %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i32* %b, i64 %storemerge3
- %loadB = load i32* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %storemerge3
+ %loadB = load i32, i32* %arrayidxB, align 2
%mul = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %storemerge3, 1
%a_float = bitcast i32* %a to float*
- %arrayidxA_plus_2 = getelementptr inbounds float* %a_float, i64 %add
+ %arrayidxA_plus_2 = getelementptr inbounds float, float* %a_float, i64 %add
%mul_float = sitofp i32 %mul to float
store float %mul_float, float* %arrayidxA_plus_2, align 2
diff --git a/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll b/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll
deleted file mode 100644
index 62291d5..0000000
--- a/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll
+++ /dev/null
@@ -1,60 +0,0 @@
-; RUN: opt -loop-accesses -analyze < %s | FileCheck %s
-
-; FIXME: This is the non-debug version of unsafe-and-rt-checks.ll not
-; requiring "asserts". Once we can check memory dependences without -debug,
-; we should remove this test.
-
-; Analyze this loop:
-; for (i = 0; i < n; i++)
-; A[i + 1] = A[i] * B[i] * C[i];
-
-target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-apple-macosx10.10.0"
-
-; CHECK: Report: unsafe dependent memory operations in loop
-
-; CHECK: Run-time memory checks:
-; CHECK-NEXT: 0:
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
-; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16* %b, i64 %storemerge3
-; CHECK-NEXT: 1:
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
-; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16* %c, i64 %storemerge3
-
-@n = global i32 20, align 4
-@B = common global i16* null, align 8
-@A = common global i16* null, align 8
-@C = common global i16* null, align 8
-
-define void @f() {
-entry:
- %a = load i16** @A, align 8
- %b = load i16** @B, align 8
- %c = load i16** @C, align 8
- br label %for.body
-
-for.body: ; preds = %for.body, %entry
- %storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
-
- %arrayidxA = getelementptr inbounds i16* %a, i64 %storemerge3
- %loadA = load i16* %arrayidxA, align 2
-
- %arrayidxB = getelementptr inbounds i16* %b, i64 %storemerge3
- %loadB = load i16* %arrayidxB, align 2
-
- %arrayidxC = getelementptr inbounds i16* %c, i64 %storemerge3
- %loadC = load i16* %arrayidxC, align 2
-
- %mul = mul i16 %loadB, %loadA
- %mul1 = mul i16 %mul, %loadC
-
- %add = add nuw nsw i64 %storemerge3, 1
- %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
- store i16 %mul1, i16* %arrayidxA_plus_2, align 2
-
- %exitcond = icmp eq i64 %add, 20
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body
- ret void
-}
diff --git a/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll b/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
index 4769a3a..a11fd7f 100644
--- a/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
+++ b/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
@@ -1,6 +1,4 @@
; RUN: opt -loop-accesses -analyze < %s | FileCheck %s
-; RUN: opt -loop-accesses -analyze -debug-only=loop-accesses < %s 2>&1 | FileCheck %s --check-prefix=DEBUG
-; REQUIRES: asserts
; Analyze this loop:
; for (i = 0; i < n; i++)
@@ -10,17 +8,17 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
; CHECK: Report: unsafe dependent memory operations in loop
-
-; DEBUG: LAA: Distance for %loadA = load i16* %arrayidxA, align 2 to store i16 %mul1, i16* %arrayidxA_plus_2, align 2: 2
-; DEBUG-NEXT: LAA: Failure because of Positive distance 2
-
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: %loadA = load i16, i16* %arrayidxA, align 2 ->
+; CHECK-NEXT: store i16 %mul1, i16* %arrayidxA_plus_2, align 2
; CHECK: Run-time memory checks:
; CHECK-NEXT: 0:
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
-; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16* %b, i64 %storemerge3
+; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
; CHECK-NEXT: 1:
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
-; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16* %c, i64 %storemerge3
+; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
@n = global i32 20, align 4
@B = common global i16* null, align 8
@@ -29,28 +27,28 @@ target triple = "x86_64-apple-macosx10.10.0"
define void @f() {
entry:
- %a = load i16** @A, align 8
- %b = load i16** @B, align 8
- %c = load i16** @C, align 8
+ %a = load i16*, i16** @A, align 8
+ %b = load i16*, i16** @B, align 8
+ %c = load i16*, i16** @C, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i16* %a, i64 %storemerge3
- %loadA = load i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
+ %loadA = load i16, i16* %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i16* %b, i64 %storemerge3
- %loadB = load i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
+ %loadB = load i16, i16* %arrayidxB, align 2
- %arrayidxC = getelementptr inbounds i16* %c, i64 %storemerge3
- %loadC = load i16* %arrayidxC, align 2
+ %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
+ %loadC = load i16, i16* %arrayidxC, align 2
%mul = mul i16 %loadB, %loadA
%mul1 = mul i16 %mul, %loadC
%add = add nuw nsw i64 %storemerge3, 1
- %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
+ %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
store i16 %mul1, i16* %arrayidxA_plus_2, align 2
%exitcond = icmp eq i64 %add, 20
diff --git a/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll b/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll
index 3c95770..d472f7c 100644
--- a/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll
+++ b/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll
@@ -9,9 +9,9 @@ for.exit: ; preds = %for.body
for.body: ; preds = %for.body, %entry
%i.01 = phi i32 [ 0, %entry ], [ %tmp8.7, %for.body ]
- %arrayidx = getelementptr i32* %bufUInt, i32 %i.01
- %arrayidx5 = getelementptr i32* %pattern, i32 %i.01
- %tmp6 = load i32* %arrayidx5, align 4
+ %arrayidx = getelementptr i32, i32* %bufUInt, i32 %i.01
+ %arrayidx5 = getelementptr i32, i32* %pattern, i32 %i.01
+ %tmp6 = load i32, i32* %arrayidx5, align 4
store i32 %tmp6, i32* %arrayidx, align 4
%tmp8.7 = add i32 %i.01, 8
%cmp.7 = icmp ult i32 %tmp8.7, 1024
diff --git a/test/Analysis/ScalarEvolution/2007-07-15-NegativeStride.ll b/test/Analysis/ScalarEvolution/2007-07-15-NegativeStride.ll
index b5eb9fc..7380da3 100644
--- a/test/Analysis/ScalarEvolution/2007-07-15-NegativeStride.ll
+++ b/test/Analysis/ScalarEvolution/2007-07-15-NegativeStride.ll
@@ -11,7 +11,7 @@ entry:
bb: ; preds = %bb, %entry
%i.01.0 = phi i32 [ 100, %entry ], [ %tmp4, %bb ] ; <i32> [#uses=2]
- %tmp1 = getelementptr [101 x i32]* @array, i32 0, i32 %i.01.0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr [101 x i32], [101 x i32]* @array, i32 0, i32 %i.01.0 ; <i32*> [#uses=1]
store i32 %x, i32* %tmp1
%tmp4 = add i32 %i.01.0, -1 ; <i32> [#uses=2]
%tmp7 = icmp sgt i32 %tmp4, -1 ; <i1> [#uses=1]
diff --git a/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll b/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll
index dcf8fc9..7e42530 100644
--- a/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll
+++ b/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll
@@ -16,11 +16,11 @@ bb.nph: ; preds = %entry
bb: ; preds = %bb1, %bb.nph
%j.01 = phi i32 [ %8, %bb1 ], [ 0, %bb.nph ] ; <i32> [#uses=1]
- load i32* %srcptr, align 4 ; <i32>:1 [#uses=2]
+ load i32, i32* %srcptr, align 4 ; <i32>:1 [#uses=2]
and i32 %1, 255 ; <i32>:2 [#uses=1]
and i32 %1, -256 ; <i32>:3 [#uses=1]
- getelementptr [256 x i8]* @lut, i32 0, i32 %2 ; <i8*>:4 [#uses=1]
- load i8* %4, align 1 ; <i8>:5 [#uses=1]
+ getelementptr [256 x i8], [256 x i8]* @lut, i32 0, i32 %2 ; <i8*>:4 [#uses=1]
+ load i8, i8* %4, align 1 ; <i8>:5 [#uses=1]
zext i8 %5 to i32 ; <i32>:6 [#uses=1]
or i32 %6, %3 ; <i32>:7 [#uses=1]
store i32 %7, i32* %dstptr, align 4
diff --git a/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll b/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll
index 7a7a640..0c24ee4 100644
--- a/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll
+++ b/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll
@@ -9,9 +9,9 @@ bb1.thread:
bb1: ; preds = %bb1, %bb1.thread
%indvar = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; <i32> [#uses=4]
%i.0.reg2mem.0 = sub i32 255, %indvar ; <i32> [#uses=2]
- %0 = getelementptr i32* %alp, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
- %1 = load i32* %0, align 4 ; <i32> [#uses=1]
- %2 = getelementptr i32* %lam, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
+ %0 = getelementptr i32, i32* %alp, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
+ %1 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
+ %2 = getelementptr i32, i32* %lam, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
store i32 %1, i32* %2, align 4
%3 = sub i32 254, %indvar ; <i32> [#uses=1]
%4 = icmp slt i32 %3, 0 ; <i1> [#uses=1]
diff --git a/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll b/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll
index f19d18c..ebcecbf 100644
--- a/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll
+++ b/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll
@@ -9,12 +9,12 @@
define void @func_15() nounwind {
entry:
- %0 = load i16* @g_16, align 2 ; <i16> [#uses=1]
+ %0 = load i16, i16* @g_16, align 2 ; <i16> [#uses=1]
%1 = icmp sgt i16 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %bb2, label %bb.nph
bb.nph: ; preds = %entry
- %g_16.promoted = load i16* @g_16 ; <i16> [#uses=1]
+ %g_16.promoted = load i16, i16* @g_16 ; <i16> [#uses=1]
br label %bb
bb: ; preds = %bb1, %bb.nph
diff --git a/test/Analysis/ScalarEvolution/2009-05-09-PointerEdgeCount.ll b/test/Analysis/ScalarEvolution/2009-05-09-PointerEdgeCount.ll
index 5d1502d..4f6b90b 100644
--- a/test/Analysis/ScalarEvolution/2009-05-09-PointerEdgeCount.ll
+++ b/test/Analysis/ScalarEvolution/2009-05-09-PointerEdgeCount.ll
@@ -11,18 +11,18 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
define void @_Z3foov() nounwind {
entry:
%x = alloca %struct.NonPod, align 8 ; <%struct.NonPod*> [#uses=2]
- %0 = getelementptr %struct.NonPod* %x, i32 0, i32 0 ; <[2 x %struct.Foo]*> [#uses=1]
- %1 = getelementptr [2 x %struct.Foo]* %0, i32 1, i32 0 ; <%struct.Foo*> [#uses=1]
+ %0 = getelementptr %struct.NonPod, %struct.NonPod* %x, i32 0, i32 0 ; <[2 x %struct.Foo]*> [#uses=1]
+ %1 = getelementptr [2 x %struct.Foo], [2 x %struct.Foo]* %0, i32 1, i32 0 ; <%struct.Foo*> [#uses=1]
br label %bb1.i
bb1.i: ; preds = %bb2.i, %entry
%.0.i = phi %struct.Foo* [ %1, %entry ], [ %4, %bb2.i ] ; <%struct.Foo*> [#uses=2]
- %2 = getelementptr %struct.NonPod* %x, i32 0, i32 0, i32 0 ; <%struct.Foo*> [#uses=1]
+ %2 = getelementptr %struct.NonPod, %struct.NonPod* %x, i32 0, i32 0, i32 0 ; <%struct.Foo*> [#uses=1]
%3 = icmp eq %struct.Foo* %.0.i, %2 ; <i1> [#uses=1]
br i1 %3, label %_ZN6NonPodD1Ev.exit, label %bb2.i
bb2.i: ; preds = %bb1.i
- %4 = getelementptr %struct.Foo* %.0.i, i32 -1 ; <%struct.Foo*> [#uses=1]
+ %4 = getelementptr %struct.Foo, %struct.Foo* %.0.i, i32 -1 ; <%struct.Foo*> [#uses=1]
br label %bb1.i
_ZN6NonPodD1Ev.exit: ; preds = %bb1.i
diff --git a/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll b/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll
index a4358aa..d18bdaf 100644
--- a/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll
+++ b/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll
@@ -3,11 +3,11 @@
define void @test() {
entry:
- %0 = load i16* undef, align 1
+ %0 = load i16, i16* undef, align 1
%1 = lshr i16 %0, 8
%2 = and i16 %1, 3
%3 = zext i16 %2 to i32
- %4 = load i8* undef, align 1
+ %4 = load i8, i8* undef, align 1
%5 = lshr i8 %4, 4
%6 = and i8 %5, 1
%7 = zext i8 %6 to i32
diff --git a/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll b/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
index 5746d1c..c4a4c30 100644
--- a/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
+++ b/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
@@ -19,20 +19,20 @@ lbl_818: ; preds = %for.end, %entry
br label %for.cond
for.cond: ; preds = %for.body, %lbl_818
- %0 = load i32* @g_814, align 4
+ %0 = load i32, i32* @g_814, align 4
%cmp = icmp sle i32 %0, 0
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%idxprom = sext i32 %0 to i64
- %arrayidx = getelementptr inbounds [0 x i32]* getelementptr inbounds ([1 x [0 x i32]]* @g_244, i32 0, i64 0), i32 0, i64 %idxprom
- %1 = load i32* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* getelementptr inbounds ([1 x [0 x i32]], [1 x [0 x i32]]* @g_244, i32 0, i64 0), i32 0, i64 %idxprom
+ %1 = load i32, i32* %arrayidx, align 1
store i32 %1, i32* @func_21_l_773, align 4
store i32 1, i32* @g_814, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %2 = load i32* @func_21_l_773, align 4
+ %2 = load i32, i32* @func_21_l_773, align 4
%tobool = icmp ne i32 %2, 0
br i1 %tobool, label %lbl_818, label %if.end
diff --git a/test/Analysis/ScalarEvolution/SolveQuadraticEquation.ll b/test/Analysis/ScalarEvolution/SolveQuadraticEquation.ll
index 2cb8c5b..f7ef0ea 100644
--- a/test/Analysis/ScalarEvolution/SolveQuadraticEquation.ll
+++ b/test/Analysis/ScalarEvolution/SolveQuadraticEquation.ll
@@ -10,7 +10,7 @@ entry:
br label %bb3
bb: ; preds = %bb3
- %tmp = getelementptr [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
+ %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
store i32 123, i32* %tmp
%tmp2 = add i32 %i.0, 1 ; <i32> [#uses=1]
br label %bb3
diff --git a/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll b/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll
index 7eeb308..0976ef9 100644
--- a/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll
+++ b/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
define i32 @test() {
entry:
- %0 = load i32** undef, align 8 ; <i32*> [#uses=1]
+ %0 = load i32*, i32** undef, align 8 ; <i32*> [#uses=1]
%1 = ptrtoint i32* %0 to i64 ; <i64> [#uses=1]
%2 = sub i64 undef, %1 ; <i64> [#uses=1]
%3 = lshr i64 %2, 3 ; <i64> [#uses=1]
diff --git a/test/Analysis/ScalarEvolution/avoid-smax-0.ll b/test/Analysis/ScalarEvolution/avoid-smax-0.ll
index 8abb430..a282ee6 100644
--- a/test/Analysis/ScalarEvolution/avoid-smax-0.ll
+++ b/test/Analysis/ScalarEvolution/avoid-smax-0.ll
@@ -11,7 +11,7 @@ entry:
br i1 %0, label %bb, label %return
bb:
- load i32* %q, align 4
+ load i32, i32* %q, align 4
icmp eq i32 %1, 0
br i1 %2, label %return, label %bb3.preheader
@@ -20,10 +20,10 @@ bb3.preheader:
bb3:
%i.0 = phi i32 [ %7, %bb3 ], [ 0, %bb3.preheader ]
- getelementptr i32* %p, i32 %i.0
- load i32* %3, align 4
+ getelementptr i32, i32* %p, i32 %i.0
+ load i32, i32* %3, align 4
add i32 %4, 1
- getelementptr i32* %p, i32 %i.0
+ getelementptr i32, i32* %p, i32 %i.0
store i32 %5, i32* %6, align 4
add i32 %i.0, 1
icmp slt i32 %7, %n
diff --git a/test/Analysis/ScalarEvolution/avoid-smax-1.ll b/test/Analysis/ScalarEvolution/avoid-smax-1.ll
index d9b83a9..e6c62ee 100644
--- a/test/Analysis/ScalarEvolution/avoid-smax-1.ll
+++ b/test/Analysis/ScalarEvolution/avoid-smax-1.ll
@@ -35,9 +35,9 @@ bb6: ; preds = %bb7, %bb.nph7
%7 = add i32 %x.06, %4 ; <i32> [#uses=1]
%8 = shl i32 %x.06, 1 ; <i32> [#uses=1]
%9 = add i32 %6, %8 ; <i32> [#uses=1]
- %10 = getelementptr i8* %r, i32 %9 ; <i8*> [#uses=1]
- %11 = load i8* %10, align 1 ; <i8> [#uses=1]
- %12 = getelementptr i8* %j, i32 %7 ; <i8*> [#uses=1]
+ %10 = getelementptr i8, i8* %r, i32 %9 ; <i8*> [#uses=1]
+ %11 = load i8, i8* %10, align 1 ; <i8> [#uses=1]
+ %12 = getelementptr i8, i8* %j, i32 %7 ; <i8*> [#uses=1]
store i8 %11, i8* %12, align 1
%13 = add i32 %x.06, 1 ; <i32> [#uses=2]
br label %bb7
@@ -102,18 +102,18 @@ bb14: ; preds = %bb15, %bb.nph3
%x.12 = phi i32 [ %40, %bb15 ], [ 0, %bb.nph3 ] ; <i32> [#uses=5]
%29 = shl i32 %x.12, 2 ; <i32> [#uses=1]
%30 = add i32 %29, %25 ; <i32> [#uses=1]
- %31 = getelementptr i8* %r, i32 %30 ; <i8*> [#uses=1]
- %32 = load i8* %31, align 1 ; <i8> [#uses=1]
+ %31 = getelementptr i8, i8* %r, i32 %30 ; <i8*> [#uses=1]
+ %32 = load i8, i8* %31, align 1 ; <i8> [#uses=1]
%.sum = add i32 %26, %x.12 ; <i32> [#uses=1]
- %33 = getelementptr i8* %j, i32 %.sum ; <i8*> [#uses=1]
+ %33 = getelementptr i8, i8* %j, i32 %.sum ; <i8*> [#uses=1]
store i8 %32, i8* %33, align 1
%34 = shl i32 %x.12, 2 ; <i32> [#uses=1]
%35 = or i32 %34, 2 ; <i32> [#uses=1]
%36 = add i32 %35, %25 ; <i32> [#uses=1]
- %37 = getelementptr i8* %r, i32 %36 ; <i8*> [#uses=1]
- %38 = load i8* %37, align 1 ; <i8> [#uses=1]
+ %37 = getelementptr i8, i8* %r, i32 %36 ; <i8*> [#uses=1]
+ %38 = load i8, i8* %37, align 1 ; <i8> [#uses=1]
%.sum6 = add i32 %27, %x.12 ; <i32> [#uses=1]
- %39 = getelementptr i8* %j, i32 %.sum6 ; <i8*> [#uses=1]
+ %39 = getelementptr i8, i8* %j, i32 %.sum6 ; <i8*> [#uses=1]
store i8 %38, i8* %39, align 1
%40 = add i32 %x.12, 1 ; <i32> [#uses=2]
br label %bb15
@@ -168,10 +168,10 @@ bb23: ; preds = %bb24, %bb.nph
%y.21 = phi i32 [ %57, %bb24 ], [ 0, %bb.nph ] ; <i32> [#uses=3]
%53 = mul i32 %y.21, %50 ; <i32> [#uses=1]
%.sum1 = add i32 %53, %51 ; <i32> [#uses=1]
- %54 = getelementptr i8* %r, i32 %.sum1 ; <i8*> [#uses=1]
+ %54 = getelementptr i8, i8* %r, i32 %.sum1 ; <i8*> [#uses=1]
%55 = mul i32 %y.21, %w ; <i32> [#uses=1]
%.sum5 = add i32 %55, %.sum3 ; <i32> [#uses=1]
- %56 = getelementptr i8* %j, i32 %.sum5 ; <i8*> [#uses=1]
+ %56 = getelementptr i8, i8* %j, i32 %.sum5 ; <i8*> [#uses=1]
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %56, i8* %54, i32 %w, i32 1, i1 false)
%57 = add i32 %y.21, 1 ; <i32> [#uses=2]
br label %bb24
@@ -186,7 +186,7 @@ bb24.bb26_crit_edge: ; preds = %bb24
bb26: ; preds = %bb24.bb26_crit_edge, %bb22
%59 = mul i32 %x, %w ; <i32> [#uses=1]
%.sum4 = add i32 %.sum3, %59 ; <i32> [#uses=1]
- %60 = getelementptr i8* %j, i32 %.sum4 ; <i8*> [#uses=1]
+ %60 = getelementptr i8, i8* %j, i32 %.sum4 ; <i8*> [#uses=1]
%61 = mul i32 %x, %w ; <i32> [#uses=1]
%62 = sdiv i32 %61, 2 ; <i32> [#uses=1]
tail call void @llvm.memset.p0i8.i32(i8* %60, i8 -128, i32 %62, i32 1, i1 false)
@@ -204,9 +204,9 @@ bb.nph11: ; preds = %bb29
bb30: ; preds = %bb31, %bb.nph11
%y.310 = phi i32 [ %70, %bb31 ], [ 0, %bb.nph11 ] ; <i32> [#uses=3]
%66 = mul i32 %y.310, %64 ; <i32> [#uses=1]
- %67 = getelementptr i8* %r, i32 %66 ; <i8*> [#uses=1]
+ %67 = getelementptr i8, i8* %r, i32 %66 ; <i8*> [#uses=1]
%68 = mul i32 %y.310, %w ; <i32> [#uses=1]
- %69 = getelementptr i8* %j, i32 %68 ; <i8*> [#uses=1]
+ %69 = getelementptr i8, i8* %j, i32 %68 ; <i8*> [#uses=1]
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %69, i8* %67, i32 %w, i32 1, i1 false)
%70 = add i32 %y.310, 1 ; <i32> [#uses=2]
br label %bb31
@@ -220,7 +220,7 @@ bb31.bb33_crit_edge: ; preds = %bb31
bb33: ; preds = %bb31.bb33_crit_edge, %bb29
%72 = mul i32 %x, %w ; <i32> [#uses=1]
- %73 = getelementptr i8* %j, i32 %72 ; <i8*> [#uses=1]
+ %73 = getelementptr i8, i8* %j, i32 %72 ; <i8*> [#uses=1]
%74 = mul i32 %x, %w ; <i32> [#uses=1]
%75 = sdiv i32 %74, 2 ; <i32> [#uses=1]
tail call void @llvm.memset.p0i8.i32(i8* %73, i8 -128, i32 %75, i32 1, i1 false)
diff --git a/test/Analysis/ScalarEvolution/ext-antecedent.ll b/test/Analysis/ScalarEvolution/ext-antecedent.ll
new file mode 100644
index 0000000..e8d3813
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/ext-antecedent.ll
@@ -0,0 +1,45 @@
+; RUN: opt -S -indvars < %s | FileCheck %s
+
+declare void @use(i1)
+
+define void @sext_condition(i8 %t) {
+; CHECK-LABEL: sext_condition
+ entry:
+ %st = sext i8 %t to i16
+ %ecmp = icmp slt i16 %st, 42
+ br i1 %ecmp, label %loop, label %exit
+
+ loop:
+; CHECK-LABEL: loop
+ %idx = phi i8 [ %t, %entry ], [ %idx.inc, %loop ]
+ %idx.inc = add i8 %idx, 1
+ %c = icmp slt i8 %idx, 42
+; CHECK: call void @use(i1 true)
+ call void @use(i1 %c)
+ %be = icmp slt i8 %idx.inc, 42
+ br i1 %be, label %loop, label %exit
+
+ exit:
+ ret void
+}
+
+define void @zext_condition(i8 %t) {
+; CHECK-LABEL: zext_condition
+ entry:
+ %st = zext i8 %t to i16
+ %ecmp = icmp ult i16 %st, 42
+ br i1 %ecmp, label %loop, label %exit
+
+ loop:
+; CHECK-LABEL: loop
+ %idx = phi i8 [ %t, %entry ], [ %idx.inc, %loop ]
+ %idx.inc = add i8 %idx, 1
+ %c = icmp ult i8 %idx, 42
+; CHECK: call void @use(i1 true)
+ call void @use(i1 %c)
+ %be = icmp ult i8 %idx.inc, 42
+ br i1 %be, label %loop, label %exit
+
+ exit:
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/fold.ll b/test/Analysis/ScalarEvolution/fold.ll
index ab57425..226a24f 100644
--- a/test/Analysis/ScalarEvolution/fold.ll
+++ b/test/Analysis/ScalarEvolution/fold.ll
@@ -34,7 +34,7 @@ loop:
%rand2 = icmp ugt i32 %A, %Z1
%Z2 = select i1 %rand2, i32 %A, i32 %Z1
; CHECK: %Z2 =
-; CHECK-NEXT: --> ([[EXPR:.*]]){{ +}}Exits: 20
+; CHECK-NEXT: --> ([[EXPR:.*]]){{ U: [^ ]+ S: [^ ]+}}{{ +}}Exits: 20
%B = trunc i32 %Z2 to i16
%C = sext i16 %B to i30
; CHECK: %C =
@@ -86,3 +86,15 @@ define void @test6(i8 %x) {
; CHECK: --> (2048 * ((zext i8 %x to i16) /u 8))
ret void
}
+
+; PR22960
+define void @test7(i32 %A) {
+; CHECK-LABEL: @test7
+ %B = sext i32 %A to i64
+ %C = zext i32 %A to i64
+ %D = sub i64 %B, %C
+ %E = trunc i64 %D to i16
+; CHECK: %E
+; CHECK-NEXT: --> 0
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll b/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll
index c9689f7..078ca03 100644
--- a/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll
+++ b/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll
@@ -53,7 +53,7 @@ define void @infer.sext.1(i32 %start, i1* %c) {
; CHECK: %idx.sext = sext i32 %idx to i64
; CHECK-NEXT: --> {(2 + (sext i32 (4 * %start) to i64)),+,2}<nsw><%loop>
%idx.inc = add nsw i32 %idx, 2
- %condition = load i1* %c
+ %condition = load i1, i1* %c
br i1 %condition, label %exit, label %loop
exit:
@@ -73,7 +73,7 @@ define void @infer.sext.2(i1* %c, i8 %start) {
; CHECK: %idx.sext = sext i8 %idx to i16
; CHECK-NEXT: --> {(1 + (sext i8 %start to i16)),+,1}<nsw><%loop>
%idx.inc = add nsw i8 %idx, 1
- %condition = load volatile i1* %c
+ %condition = load volatile i1, i1* %c
br i1 %condition, label %exit, label %loop
exit:
@@ -93,7 +93,7 @@ define void @infer.zext.1(i1* %c, i8 %start) {
; CHECK: %idx.zext = zext i8 %idx to i16
; CHECK-NEXT: --> {(1 + (zext i8 %start to i16)),+,1}<nuw><%loop>
%idx.inc = add nuw i8 %idx, 1
- %condition = load volatile i1* %c
+ %condition = load volatile i1, i1* %c
br i1 %condition, label %exit, label %loop
exit:
diff --git a/test/Analysis/ScalarEvolution/infer-via-ranges.ll b/test/Analysis/ScalarEvolution/infer-via-ranges.ll
new file mode 100644
index 0000000..3627c3a
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/infer-via-ranges.ll
@@ -0,0 +1,30 @@
+; RUN: opt -indvars -S < %s | FileCheck %s
+
+define void @infer_via_ranges(i32 *%arr, i32 %n) {
+; CHECK-LABEL: @infer_via_ranges
+ entry:
+ %first.itr.check = icmp sgt i32 %n, 0
+ %start = sub i32 %n, 1
+ br i1 %first.itr.check, label %loop, label %exit
+
+ loop:
+; CHECK-LABEL: loop:
+ %idx = phi i32 [ %start, %entry ] , [ %idx.dec, %in.bounds ]
+ %idx.dec = sub i32 %idx, 1
+ %abc = icmp sge i32 %idx, 0
+; CHECK: br i1 true, label %in.bounds, label %out.of.bounds
+ br i1 %abc, label %in.bounds, label %out.of.bounds
+
+ in.bounds:
+; CHECK-LABEL: in.bounds:
+ %addr = getelementptr i32, i32* %arr, i32 %idx
+ store i32 0, i32* %addr
+ %next = icmp sgt i32 %idx.dec, -1
+ br i1 %next, label %loop, label %exit
+
+ out.of.bounds:
+ ret void
+
+ exit:
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/load-with-range-metadata.ll b/test/Analysis/ScalarEvolution/load-with-range-metadata.ll
index 32c1074..f26c8d5 100644
--- a/test/Analysis/ScalarEvolution/load-with-range-metadata.ll
+++ b/test/Analysis/ScalarEvolution/load-with-range-metadata.ll
@@ -3,7 +3,7 @@
define i32 @slt_trip_count_with_range(i32 *%ptr0, i32 *%ptr1) {
; CHECK-LABEL: slt_trip_count_with_range
entry:
- %limit = load i32* %ptr0, !range !0
+ %limit = load i32, i32* %ptr0, !range !0
br label %loop
loop:
@@ -20,7 +20,7 @@ define i32 @slt_trip_count_with_range(i32 *%ptr0, i32 *%ptr1) {
define i32 @ult_trip_count_with_range(i32 *%ptr0, i32 *%ptr1) {
; CHECK-LABEL: ult_trip_count_with_range
entry:
- %limit = load i32* %ptr0, !range !0
+ %limit = load i32, i32* %ptr0, !range !0
br label %loop
loop:
diff --git a/test/Analysis/ScalarEvolution/load.ll b/test/Analysis/ScalarEvolution/load.ll
index 2c753f5..ea79476 100644
--- a/test/Analysis/ScalarEvolution/load.ll
+++ b/test/Analysis/ScalarEvolution/load.ll
@@ -14,14 +14,14 @@ entry:
for.body: ; preds = %entry, %for.body
%sum.04 = phi i32 [ 0, %entry ], [ %add2, %for.body ]
-; CHECK: --> %sum.04{{ *}}Exits: 2450
+; CHECK: --> %sum.04{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 2450
%i.03 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds [50 x i32]* @arr1, i32 0, i32 %i.03
- %0 = load i32* %arrayidx, align 4
-; CHECK: --> %0{{ *}}Exits: 50
- %arrayidx1 = getelementptr inbounds [50 x i32]* @arr2, i32 0, i32 %i.03
- %1 = load i32* %arrayidx1, align 4
-; CHECK: --> %1{{ *}}Exits: 0
+ %arrayidx = getelementptr inbounds [50 x i32], [50 x i32]* @arr1, i32 0, i32 %i.03
+ %0 = load i32, i32* %arrayidx, align 4
+; CHECK: --> %0{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 50
+ %arrayidx1 = getelementptr inbounds [50 x i32], [50 x i32]* @arr2, i32 0, i32 %i.03
+ %1 = load i32, i32* %arrayidx1, align 4
+; CHECK: --> %1{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 0
%add = add i32 %0, %sum.04
%add2 = add i32 %add, %1
%inc = add nsw i32 %i.03, 1
@@ -48,15 +48,15 @@ entry:
for.body: ; preds = %entry, %for.body
%sum.02 = phi i32 [ 0, %entry ], [ %add, %for.body ]
-; CHECK: --> %sum.02{{ *}}Exits: 10
+; CHECK: --> %sum.02{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 10
%n.01 = phi %struct.ListNode* [ bitcast ({ %struct.ListNode*, i32, [4 x i8] }* @node5 to %struct.ListNode*), %entry ], [ %1, %for.body ]
-; CHECK: --> %n.01{{ *}}Exits: @node1
- %i = getelementptr inbounds %struct.ListNode* %n.01, i64 0, i32 1
- %0 = load i32* %i, align 4
+; CHECK: --> %n.01{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: @node1
+ %i = getelementptr inbounds %struct.ListNode, %struct.ListNode* %n.01, i64 0, i32 1
+ %0 = load i32, i32* %i, align 4
%add = add nsw i32 %0, %sum.02
- %next = getelementptr inbounds %struct.ListNode* %n.01, i64 0, i32 0
- %1 = load %struct.ListNode** %next, align 8
-; CHECK: --> %1{{ *}}Exits: 0
+ %next = getelementptr inbounds %struct.ListNode, %struct.ListNode* %n.01, i64 0, i32 0
+ %1 = load %struct.ListNode*, %struct.ListNode** %next, align 8
+; CHECK: --> %1{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 0
%cmp = icmp eq %struct.ListNode* %1, null
br i1 %cmp, label %for.end, label %for.body
diff --git a/test/Analysis/ScalarEvolution/max-trip-count-address-space.ll b/test/Analysis/ScalarEvolution/max-trip-count-address-space.ll
index aa5254c..8d6cb2f 100644
--- a/test/Analysis/ScalarEvolution/max-trip-count-address-space.ll
+++ b/test/Analysis/ScalarEvolution/max-trip-count-address-space.ll
@@ -4,7 +4,7 @@
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:8:8:8-p4:64:64:64-n16:32:64"
-; CHECK: {%d,+,4}<%bb> Exits: ((4 * (trunc i32 (-1 + %n) to i16)) + %d)
+; CHECK: {%d,+,4}<%bb>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: ((4 * (trunc i32 (-1 + %n) to i16)) + %d)
define void @foo(i32 addrspace(1)* nocapture %d, i32 %n) nounwind {
@@ -21,7 +21,7 @@ bb: ; preds = %bb1, %bb.nph
%p.01 = phi i8 [ %4, %bb1 ], [ -1, %bb.nph ] ; <i8> [#uses=2]
%1 = sext i8 %p.01 to i32 ; <i32> [#uses=1]
%2 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
- %3 = getelementptr i32 addrspace(1)* %d, i64 %2 ; <i32*> [#uses=1]
+ %3 = getelementptr i32, i32 addrspace(1)* %d, i64 %2 ; <i32*> [#uses=1]
store i32 %1, i32 addrspace(1)* %3, align 4
%4 = add i8 %p.01, 1 ; <i8> [#uses=1]
%5 = add i32 %i.02, 1 ; <i32> [#uses=2]
@@ -50,7 +50,7 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body, %for.body.lr.ph
%indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %for.body.lr.ph ]
- %arrayidx = getelementptr i8 addrspace(1)* %a, i64 %indvar
+ %arrayidx = getelementptr i8, i8 addrspace(1)* %a, i64 %indvar
store i8 0, i8 addrspace(1)* %arrayidx, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp ne i64 %indvar.next, %tmp
diff --git a/test/Analysis/ScalarEvolution/max-trip-count.ll b/test/Analysis/ScalarEvolution/max-trip-count.ll
index 31f06a4..72560c7 100644
--- a/test/Analysis/ScalarEvolution/max-trip-count.ll
+++ b/test/Analysis/ScalarEvolution/max-trip-count.ll
@@ -2,7 +2,7 @@
; ScalarEvolution should be able to understand the loop and eliminate the casts.
-; CHECK: {%d,+,sizeof(i32)}
+; CHECK: {%d,+,4}
define void @foo(i32* nocapture %d, i32 %n) nounwind {
entry:
@@ -17,7 +17,7 @@ bb: ; preds = %bb1, %bb.nph
%p.01 = phi i8 [ %4, %bb1 ], [ -1, %bb.nph ] ; <i8> [#uses=2]
%1 = sext i8 %p.01 to i32 ; <i32> [#uses=1]
%2 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
- %3 = getelementptr i32* %d, i64 %2 ; <i32*> [#uses=1]
+ %3 = getelementptr i32, i32* %d, i64 %2 ; <i32*> [#uses=1]
store i32 %1, i32* %3, align 4
%4 = add i8 %p.01, 1 ; <i8> [#uses=1]
%5 = add i32 %i.02, 1 ; <i32> [#uses=2]
@@ -39,7 +39,7 @@ return: ; preds = %bb1.return_crit_edge, %entry
; count, it should say so.
; PR7845
-; CHECK: Loop %for.cond: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.cond: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.cond: max backedge-taken count is 5
@.str = private constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=2]
@@ -65,7 +65,7 @@ for.inc: ; preds = %for.body
br label %for.cond
for.end: ; preds = %for.body, %for.cond
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %g_4.0) nounwind ; <i32> [#uses=0]
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %g_4.0) nounwind ; <i32> [#uses=0]
ret i32 0
}
@@ -82,7 +82,7 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body, %for.body.lr.ph
%indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %for.body.lr.ph ]
- %arrayidx = getelementptr i8* %a, i64 %indvar
+ %arrayidx = getelementptr i8, i8* %a, i64 %indvar
store i8 0, i8* %arrayidx, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp ne i64 %indvar.next, %tmp
@@ -101,7 +101,7 @@ for.end: ; preds = %for.cond.for.end_cr
; PR19799: Indvars miscompile due to an incorrect max backedge taken count from SCEV.
; CHECK-LABEL: @pr19799
-; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.body.i: max backedge-taken count is 1
@a = common global i32 0, align 4
@@ -127,7 +127,7 @@ bar.exit: ; preds = %for.cond.i, %for.bo
; PR18886: Indvars miscompile due to an incorrect max backedge taken count from SCEV.
; CHECK-LABEL: @pr18886
-; CHECK: Loop %for.body: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.body: max backedge-taken count is 3
@aa = global i64 0, align 8
@@ -157,8 +157,8 @@ return:
; before the check is forever skipped.
;
; CHECK-LABEL: @cannot_compute_mustexit
-; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
-; CHECK: Loop %for.body.i: Unpredictable max backedge-taken count.
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: Unpredictable max backedge-taken count.
@b = common global i32 0, align 4
define i32 @cannot_compute_mustexit() {
@@ -174,7 +174,7 @@ for.body.i: ; preds = %for.cond.i, %entry
for.cond.i: ; preds = %for.body.i
store i32 %add.i.i, i32* @a, align 4
- %ld = load volatile i32* @b
+ %ld = load volatile i32, i32* @b
%cmp.i = icmp ne i32 %ld, 0
br i1 %cmp.i, label %for.body.i, label %bar.exit
@@ -186,7 +186,7 @@ bar.exit: ; preds = %for.cond.i, %for.bo
; MaxBECount should be the minimum of them.
;
; CHECK-LABEL: @two_mustexit
-; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.body.i: max backedge-taken count is 1
define i32 @two_mustexit() {
entry:
diff --git a/test/Analysis/ScalarEvolution/min-max-exprs.ll b/test/Analysis/ScalarEvolution/min-max-exprs.ll
index 3e0a35d..892fc23 100644
--- a/test/Analysis/ScalarEvolution/min-max-exprs.ll
+++ b/test/Analysis/ScalarEvolution/min-max-exprs.ll
@@ -34,8 +34,8 @@ bb2: ; preds = %bb1
; min(N, i+3)
; CHECK: select i1 %tmp4, i64 %tmp5, i64 %tmp6
; CHECK-NEXT: --> (-1 + (-1 * ((-1 + (-1 * (sext i32 {3,+,1}<nw><%bb1> to i64))) smax (-1 + (-1 * (sext i32 %N to i64))))))
- %tmp11 = getelementptr inbounds i32* %A, i64 %tmp9
- %tmp12 = load i32* %tmp11, align 4
+ %tmp11 = getelementptr inbounds i32, i32* %A, i64 %tmp9
+ %tmp12 = load i32, i32* %tmp11, align 4
%tmp13 = shl nsw i32 %tmp12, 1
%tmp14 = icmp sge i32 3, %i.0
%tmp17 = add nsw i64 %i.0.1, -3
@@ -43,7 +43,7 @@ bb2: ; preds = %bb1
; max(0, i - 3)
; CHECK: select i1 %tmp14, i64 0, i64 %tmp17
; CHECK-NEXT: --> (-3 + (3 smax {0,+,1}<nuw><nsw><%bb1>))
- %tmp21 = getelementptr inbounds i32* %A, i64 %tmp19
+ %tmp21 = getelementptr inbounds i32, i32* %A, i64 %tmp19
store i32 %tmp13, i32* %tmp21, align 4
%tmp23 = add nuw nsw i32 %i.0, 1
br label %bb1
diff --git a/test/Analysis/ScalarEvolution/nowrap-preinc-limits.ll b/test/Analysis/ScalarEvolution/nowrap-preinc-limits.ll
new file mode 100644
index 0000000..1a5409d
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/nowrap-preinc-limits.ll
@@ -0,0 +1,44 @@
+; RUN: opt -analyze -scalar-evolution < %s | FileCheck %s
+
+define void @f(i1* %condition) {
+; CHECK-LABEL: Classifying expressions for: @f
+ entry:
+ br label %loop
+
+ loop:
+ %idx = phi i32 [ 0, %entry ], [ %idx.inc, %loop ]
+ %idx.inc = add nsw i32 %idx, 1
+
+ %idx.inc2 = add i32 %idx.inc, 1
+ %idx.inc2.zext = zext i32 %idx.inc2 to i64
+
+; CHECK: %idx.inc2.zext = zext i32 %idx.inc2 to i64
+; CHECK-NEXT: --> {2,+,1}<nuw><%loop>
+
+ %c = load volatile i1, i1* %condition
+ br i1 %c, label %loop, label %exit
+
+ exit:
+ ret void
+}
+
+define void @g(i1* %condition) {
+; CHECK-LABEL: Classifying expressions for: @g
+ entry:
+ br label %loop
+
+ loop:
+ %idx = phi i32 [ 0, %entry ], [ %idx.inc, %loop ]
+ %idx.inc = add nsw i32 %idx, 3
+
+ %idx.inc2 = add i32 %idx.inc, -1
+ %idx.inc2.sext = sext i32 %idx.inc2 to i64
+; CHECK: %idx.inc2.sext = sext i32 %idx.inc2 to i64
+; CHECK-NEXT: --> {2,+,3}<nuw><nsw><%loop>
+
+ %c = load volatile i1, i1* %condition
+ br i1 %c, label %loop, label %exit
+
+ exit:
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/nsw-offset-assume.ll b/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
index 29cf658..bef1070 100644
--- a/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
+++ b/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
@@ -24,23 +24,23 @@ bb: ; preds = %bb.nph, %bb1
; CHECK: --> {0,+,2}<nuw><nsw><%bb>
%1 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
-; CHECK: %2 = getelementptr inbounds double* %d, i64 %1
+; CHECK: %2 = getelementptr inbounds double, double* %d, i64 %1
; CHECK: --> {%d,+,16}<nsw><%bb>
- %2 = getelementptr inbounds double* %d, i64 %1 ; <double*> [#uses=1]
+ %2 = getelementptr inbounds double, double* %d, i64 %1 ; <double*> [#uses=1]
- %3 = load double* %2, align 8 ; <double> [#uses=1]
+ %3 = load double, double* %2, align 8 ; <double> [#uses=1]
%4 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %5 = getelementptr inbounds double* %q, i64 %4 ; <double*> [#uses=1]
- %6 = load double* %5, align 8 ; <double> [#uses=1]
+ %5 = getelementptr inbounds double, double* %q, i64 %4 ; <double*> [#uses=1]
+ %6 = load double, double* %5, align 8 ; <double> [#uses=1]
%7 = or i32 %i.01, 1 ; <i32> [#uses=1]
; CHECK: %8 = sext i32 %7 to i64
; CHECK: --> {1,+,2}<nuw><nsw><%bb>
%8 = sext i32 %7 to i64 ; <i64> [#uses=1]
-; CHECK: %9 = getelementptr inbounds double* %q, i64 %8
+; CHECK: %9 = getelementptr inbounds double, double* %q, i64 %8
; CHECK: {(8 + %q),+,16}<nsw><%bb>
- %9 = getelementptr inbounds double* %q, i64 %8 ; <double*> [#uses=1]
+ %9 = getelementptr inbounds double, double* %q, i64 %8 ; <double*> [#uses=1]
; Artificially repeat the above three instructions, this time using
; add nsw instead of or.
@@ -50,16 +50,16 @@ bb: ; preds = %bb.nph, %bb1
; CHECK: --> {1,+,2}<nuw><nsw><%bb>
%t8 = sext i32 %t7 to i64 ; <i64> [#uses=1]
-; CHECK: %t9 = getelementptr inbounds double* %q, i64 %t8
+; CHECK: %t9 = getelementptr inbounds double, double* %q, i64 %t8
; CHECK: {(8 + %q),+,16}<nsw><%bb>
- %t9 = getelementptr inbounds double* %q, i64 %t8 ; <double*> [#uses=1]
+ %t9 = getelementptr inbounds double, double* %q, i64 %t8 ; <double*> [#uses=1]
- %10 = load double* %9, align 8 ; <double> [#uses=1]
+ %10 = load double, double* %9, align 8 ; <double> [#uses=1]
%11 = fadd double %6, %10 ; <double> [#uses=1]
%12 = fadd double %11, 3.200000e+00 ; <double> [#uses=1]
%13 = fmul double %3, %12 ; <double> [#uses=1]
%14 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %15 = getelementptr inbounds double* %d, i64 %14 ; <double*> [#uses=1]
+ %15 = getelementptr inbounds double, double* %d, i64 %14 ; <double*> [#uses=1]
store double %13, double* %15, align 8
%16 = add nsw i32 %i.01, 2 ; <i32> [#uses=2]
br label %bb1
diff --git a/test/Analysis/ScalarEvolution/nsw-offset.ll b/test/Analysis/ScalarEvolution/nsw-offset.ll
index 88cdcf2..127bb19 100644
--- a/test/Analysis/ScalarEvolution/nsw-offset.ll
+++ b/test/Analysis/ScalarEvolution/nsw-offset.ll
@@ -22,23 +22,23 @@ bb: ; preds = %bb.nph, %bb1
; CHECK: --> {0,+,2}<nuw><nsw><%bb>
%1 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
-; CHECK: %2 = getelementptr inbounds double* %d, i64 %1
+; CHECK: %2 = getelementptr inbounds double, double* %d, i64 %1
; CHECK: --> {%d,+,16}<nsw><%bb>
- %2 = getelementptr inbounds double* %d, i64 %1 ; <double*> [#uses=1]
+ %2 = getelementptr inbounds double, double* %d, i64 %1 ; <double*> [#uses=1]
- %3 = load double* %2, align 8 ; <double> [#uses=1]
+ %3 = load double, double* %2, align 8 ; <double> [#uses=1]
%4 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %5 = getelementptr inbounds double* %q, i64 %4 ; <double*> [#uses=1]
- %6 = load double* %5, align 8 ; <double> [#uses=1]
+ %5 = getelementptr inbounds double, double* %q, i64 %4 ; <double*> [#uses=1]
+ %6 = load double, double* %5, align 8 ; <double> [#uses=1]
%7 = or i32 %i.01, 1 ; <i32> [#uses=1]
; CHECK: %8 = sext i32 %7 to i64
; CHECK: --> {1,+,2}<nuw><nsw><%bb>
%8 = sext i32 %7 to i64 ; <i64> [#uses=1]
-; CHECK: %9 = getelementptr inbounds double* %q, i64 %8
+; CHECK: %9 = getelementptr inbounds double, double* %q, i64 %8
; CHECK: {(8 + %q),+,16}<nsw><%bb>
- %9 = getelementptr inbounds double* %q, i64 %8 ; <double*> [#uses=1]
+ %9 = getelementptr inbounds double, double* %q, i64 %8 ; <double*> [#uses=1]
; Artificially repeat the above three instructions, this time using
; add nsw instead of or.
@@ -48,16 +48,16 @@ bb: ; preds = %bb.nph, %bb1
; CHECK: --> {1,+,2}<nuw><nsw><%bb>
%t8 = sext i32 %t7 to i64 ; <i64> [#uses=1]
-; CHECK: %t9 = getelementptr inbounds double* %q, i64 %t8
+; CHECK: %t9 = getelementptr inbounds double, double* %q, i64 %t8
; CHECK: {(8 + %q),+,16}<nsw><%bb>
- %t9 = getelementptr inbounds double* %q, i64 %t8 ; <double*> [#uses=1]
+ %t9 = getelementptr inbounds double, double* %q, i64 %t8 ; <double*> [#uses=1]
- %10 = load double* %9, align 8 ; <double> [#uses=1]
+ %10 = load double, double* %9, align 8 ; <double> [#uses=1]
%11 = fadd double %6, %10 ; <double> [#uses=1]
%12 = fadd double %11, 3.200000e+00 ; <double> [#uses=1]
%13 = fmul double %3, %12 ; <double> [#uses=1]
%14 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %15 = getelementptr inbounds double* %d, i64 %14 ; <double*> [#uses=1]
+ %15 = getelementptr inbounds double, double* %d, i64 %14 ; <double*> [#uses=1]
store double %13, double* %15, align 8
%16 = add nsw i32 %i.01, 2 ; <i32> [#uses=2]
br label %bb1
diff --git a/test/Analysis/ScalarEvolution/nsw.ll b/test/Analysis/ScalarEvolution/nsw.ll
index d776a5a..15444e3 100644
--- a/test/Analysis/ScalarEvolution/nsw.ll
+++ b/test/Analysis/ScalarEvolution/nsw.ll
@@ -7,7 +7,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
; CHECK: Classifying expressions for: @test1
define void @test1(double* %p) nounwind {
entry:
- %tmp = load double* %p, align 8 ; <double> [#uses=1]
+ %tmp = load double, double* %p, align 8 ; <double> [#uses=1]
%tmp1 = fcmp ogt double %tmp, 2.000000e+00 ; <i1> [#uses=1]
br i1 %tmp1, label %bb.nph, label %return
@@ -19,11 +19,11 @@ bb: ; preds = %bb1, %bb.nph
; CHECK: %i.01
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb>
%tmp2 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %tmp3 = getelementptr double* %p, i64 %tmp2 ; <double*> [#uses=1]
- %tmp4 = load double* %tmp3, align 8 ; <double> [#uses=1]
+ %tmp3 = getelementptr double, double* %p, i64 %tmp2 ; <double*> [#uses=1]
+ %tmp4 = load double, double* %tmp3, align 8 ; <double> [#uses=1]
%tmp5 = fmul double %tmp4, 9.200000e+00 ; <double> [#uses=1]
%tmp6 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr double* %p, i64 %tmp6 ; <double*> [#uses=1]
+ %tmp7 = getelementptr double, double* %p, i64 %tmp6 ; <double*> [#uses=1]
; CHECK: %tmp7
; CHECK-NEXT: --> {%p,+,8}<%bb>
store double %tmp5, double* %tmp7, align 8
@@ -36,10 +36,10 @@ bb1: ; preds = %bb
%phitmp = sext i32 %tmp8 to i64 ; <i64> [#uses=1]
; CHECK: %phitmp
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%bb>
- %tmp9 = getelementptr double* %p, i64 %phitmp ; <double*> [#uses=1]
+ %tmp9 = getelementptr double, double* %p, i64 %phitmp ; <double*> [#uses=1]
; CHECK: %tmp9
; CHECK-NEXT: --> {(8 + %p),+,8}<%bb>
- %tmp10 = load double* %tmp9, align 8 ; <double> [#uses=1]
+ %tmp10 = load double, double* %tmp9, align 8 ; <double> [#uses=1]
%tmp11 = fcmp ogt double %tmp10, 2.000000e+00 ; <i1> [#uses=1]
br i1 %tmp11, label %bb, label %bb1.return_crit_edge
@@ -64,7 +64,7 @@ for.body.i.i: ; preds = %for.body.i.i, %for.
; CHECK: %__first.addr.02.i.i
; CHECK-NEXT: --> {%begin,+,4}<nuw><%for.body.i.i>
store i32 0, i32* %__first.addr.02.i.i, align 4
- %ptrincdec.i.i = getelementptr inbounds i32* %__first.addr.02.i.i, i64 1
+ %ptrincdec.i.i = getelementptr inbounds i32, i32* %__first.addr.02.i.i, i64 1
; CHECK: %ptrincdec.i.i
; CHECK-NEXT: --> {(4 + %begin),+,4}<nuw><%for.body.i.i>
%cmp.i.i = icmp eq i32* %ptrincdec.i.i, %end
@@ -90,10 +90,10 @@ for.body.i.i: ; preds = %entry, %for.body.i.
%tmp = add nsw i64 %indvar.i.i, 1
; CHECK: %tmp =
; CHECK: {1,+,1}<nuw><nsw><%for.body.i.i>
- %ptrincdec.i.i = getelementptr inbounds i32* %begin, i64 %tmp
+ %ptrincdec.i.i = getelementptr inbounds i32, i32* %begin, i64 %tmp
; CHECK: %ptrincdec.i.i =
; CHECK: {(4 + %begin),+,4}<nsw><%for.body.i.i>
- %__first.addr.08.i.i = getelementptr inbounds i32* %begin, i64 %indvar.i.i
+ %__first.addr.08.i.i = getelementptr inbounds i32, i32* %begin, i64 %indvar.i.i
; CHECK: %__first.addr.08.i.i
; CHECK: {%begin,+,4}<nsw><%for.body.i.i>
store i32 0, i32* %__first.addr.08.i.i, align 4
@@ -124,17 +124,17 @@ exit:
}
; CHECK-LABEL: PR12375
-; CHECK: --> {(4 + %arg),+,4}<nuw><%bb1> Exits: (8 + %arg)<nsw>
+; CHECK: --> {(4 + %arg),+,4}<nuw><%bb1>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: (8 + %arg)<nsw>
define i32 @PR12375(i32* readnone %arg) {
bb:
- %tmp = getelementptr inbounds i32* %arg, i64 2
+ %tmp = getelementptr inbounds i32, i32* %arg, i64 2
br label %bb1
bb1: ; preds = %bb1, %bb
%tmp2 = phi i32* [ %arg, %bb ], [ %tmp5, %bb1 ]
%tmp3 = phi i32 [ 0, %bb ], [ %tmp4, %bb1 ]
%tmp4 = add nsw i32 %tmp3, 1
- %tmp5 = getelementptr inbounds i32* %tmp2, i64 1
+ %tmp5 = getelementptr inbounds i32, i32* %tmp2, i64 1
%tmp6 = icmp ult i32* %tmp5, %tmp
br i1 %tmp6, label %bb1, label %bb7
@@ -143,7 +143,7 @@ bb7: ; preds = %bb1
}
; CHECK-LABEL: PR12376
-; CHECK: --> {(4 + %arg),+,4}<nuw><%bb2> Exits: (4 + (4 * ((3 + (-1 * %arg) + (%arg umax %arg1)) /u 4)) + %arg)
+; CHECK: --> {(4 + %arg),+,4}<nuw><%bb2>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: (4 + (4 * ((3 + (-1 * %arg) + (%arg umax %arg1)) /u 4)) + %arg)
define void @PR12376(i32* nocapture %arg, i32* nocapture %arg1) {
bb:
br label %bb2
@@ -151,7 +151,7 @@ bb:
bb2: ; preds = %bb2, %bb
%tmp = phi i32* [ %arg, %bb ], [ %tmp4, %bb2 ]
%tmp3 = icmp ult i32* %tmp, %arg1
- %tmp4 = getelementptr inbounds i32* %tmp, i64 1
+ %tmp4 = getelementptr inbounds i32, i32* %tmp, i64 1
br i1 %tmp3, label %bb2, label %bb5
bb5: ; preds = %bb2
@@ -161,7 +161,7 @@ bb5: ; preds = %bb2
declare void @f(i32)
; CHECK-LABEL: nswnowrap
-; CHECK: --> {(1 + %v),+,1}<nsw><%for.body> Exits: (2 + %v)
+; CHECK: --> {(1 + %v),+,1}<nsw><%for.body>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: (2 + %v)
define void @nswnowrap(i32 %v) {
entry:
%add = add nsw i32 %v, 1
diff --git a/test/Analysis/ScalarEvolution/pr22179.ll b/test/Analysis/ScalarEvolution/pr22179.ll
index d9fb510..21ed055 100644
--- a/test/Analysis/ScalarEvolution/pr22179.ll
+++ b/test/Analysis/ScalarEvolution/pr22179.ll
@@ -9,12 +9,12 @@
; Function Attrs: nounwind ssp uwtable
define i32 @main() {
; CHECK-LABEL: Classifying expressions for: @main
- store i8 0, i8* getelementptr inbounds (%struct.anon* @a, i64 0, i32 0), align 1
+ store i8 0, i8* getelementptr inbounds (%struct.anon, %struct.anon* @a, i64 0, i32 0), align 1
br label %loop
loop:
%storemerge1 = phi i8 [ 0, %0 ], [ %inc, %loop ]
- %m = load volatile i32* getelementptr inbounds (%struct.S* @b, i64 0, i32 0), align 4
+ %m = load volatile i32, i32* getelementptr inbounds (%struct.S, %struct.S* @b, i64 0, i32 0), align 4
%inc = add nuw i8 %storemerge1, 1
; CHECK: %inc = add nuw i8 %storemerge1, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop>
@@ -23,6 +23,6 @@ loop:
br i1 %exitcond, label %exit, label %loop
exit:
- store i8 -128, i8* getelementptr inbounds (%struct.anon* @a, i64 0, i32 0), align 1
+ store i8 -128, i8* getelementptr inbounds (%struct.anon, %struct.anon* @a, i64 0, i32 0), align 1
ret i32 0
}
diff --git a/test/Analysis/ScalarEvolution/pr22674.ll b/test/Analysis/ScalarEvolution/pr22674.ll
index 7defcb9..1bc7fd3 100644
--- a/test/Analysis/ScalarEvolution/pr22674.ll
+++ b/test/Analysis/ScalarEvolution/pr22674.ll
@@ -44,11 +44,11 @@ cond.false: ; preds = %for.end, %for.inc,
unreachable
_ZNK4llvm12AttributeSet3endEj.exit: ; preds = %for.end
- %second.i.i.i = getelementptr inbounds %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507"* undef, i32 %I.099.lcssa129, i32 1
- %0 = load %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"** %second.i.i.i, align 4, !tbaa !2
- %NumAttrs.i.i.i = getelementptr inbounds %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"* %0, i32 0, i32 1
- %1 = load i32* %NumAttrs.i.i.i, align 4, !tbaa !8
- %add.ptr.i.i.i55 = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* undef, i32 %1
+ %second.i.i.i = getelementptr inbounds %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507", %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507"* undef, i32 %I.099.lcssa129, i32 1
+ %0 = load %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"*, %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"** %second.i.i.i, align 4, !tbaa !2
+ %NumAttrs.i.i.i = getelementptr inbounds %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506", %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"* %0, i32 0, i32 1
+ %1 = load i32, i32* %NumAttrs.i.i.i, align 4, !tbaa !8
+ %add.ptr.i.i.i55 = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509", %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* undef, i32 %1
br i1 undef, label %return, label %for.body11
for.cond9: ; preds = %_ZNK4llvm9Attribute13getKindAsEnumEv.exit
@@ -58,7 +58,7 @@ for.cond9: ; preds = %_ZNK4llvm9Attribute
for.body11: ; preds = %for.cond9, %_ZNK4llvm12AttributeSet3endEj.exit
%I5.096 = phi %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* [ %incdec.ptr, %for.cond9 ], [ undef, %_ZNK4llvm12AttributeSet3endEj.exit ]
%2 = bitcast %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* %I5.096 to i32*
- %3 = load i32* %2, align 4, !tbaa !10
+ %3 = load i32, i32* %2, align 4, !tbaa !10
%tobool.i59 = icmp eq i32 %3, 0
br i1 %tobool.i59, label %cond.false21, label %_ZNK4llvm9Attribute15isEnumAttributeEv.exit
@@ -70,7 +70,7 @@ _ZNK4llvm9Attribute15isEnumAttributeEv.exit: ; preds = %for.body11
]
_ZNK4llvm9Attribute13getKindAsEnumEv.exit: ; preds = %_ZNK4llvm9Attribute15isEnumAttributeEv.exit, %_ZNK4llvm9Attribute15isEnumAttributeEv.exit
- %incdec.ptr = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* %I5.096, i32 1
+ %incdec.ptr = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509", %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* %I5.096, i32 1
br i1 undef, label %for.cond9, label %return
cond.false21: ; preds = %_ZNK4llvm9Attribute15isEnumAttributeEv.exit, %for.body11
diff --git a/test/Analysis/ScalarEvolution/pr22856.ll b/test/Analysis/ScalarEvolution/pr22856.ll
new file mode 100644
index 0000000..89e8351
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/pr22856.ll
@@ -0,0 +1,33 @@
+; RUN: opt -loop-reduce -verify < %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64--linux-gnu"
+
+define void @unbounded() {
+
+block_A:
+ %0 = sext i32 undef to i64
+ br i1 undef, label %block_F, label %block_G
+
+block_C: ; preds = %block_F
+ br i1 undef, label %block_D, label %block_E
+
+block_D: ; preds = %block_D, %block_C
+ br i1 undef, label %block_E, label %block_D
+
+block_E: ; preds = %block_D, %block_C
+ %iv2 = phi i64 [ %4, %block_D ], [ %4, %block_C ]
+ %1 = add nsw i32 %iv1, 1
+ %2 = icmp eq i32 %1, undef
+ br i1 %2, label %block_G, label %block_F
+
+block_F: ; preds = %block_E, %block_A
+ %iv3 = phi i64 [ %iv2, %block_E ], [ %0, %block_A ]
+ %iv1 = phi i32 [ %1, %block_E ], [ undef, %block_A ]
+ %3 = add nsw i64 %iv3, 2
+ %4 = add nsw i64 %iv3, 1
+ br label %block_C
+
+block_G: ; preds = %block_E, %block_A
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/range-signedness.ll b/test/Analysis/ScalarEvolution/range-signedness.ll
new file mode 100644
index 0000000..d04fc9e
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/range-signedness.ll
@@ -0,0 +1,39 @@
+; RUN: opt -analyze -scalar-evolution < %s | FileCheck %s
+
+define void @x(i1* %cond) {
+; CHECK-LABEL: Classifying expressions for: @x
+ entry:
+ br label %loop
+
+ loop:
+ %idx = phi i8 [ 0, %entry ], [ %idx.inc, %loop ]
+; CHECK: %idx = phi i8 [ 0, %entry ], [ %idx.inc, %loop ]
+; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,-128) S: [0,-128)
+
+ %idx.inc = add nsw i8 %idx, 1
+
+ %c = load volatile i1, i1* %cond
+ br i1 %c, label %loop, label %exit
+
+ exit:
+ ret void
+}
+
+define void @y(i8* %addr) {
+; CHECK-LABEL: Classifying expressions for: @y
+ entry:
+ br label %loop
+
+ loop:
+ %idx = phi i8 [-5, %entry ], [ %idx.inc, %loop ]
+; CHECK: %idx = phi i8 [ -5, %entry ], [ %idx.inc, %loop ]
+; CHECK-NEXT: --> {-5,+,1}<%loop> U: [-5,6) S: [-5,6)
+
+ %idx.inc = add i8 %idx, 1
+
+ %continue = icmp slt i8 %idx.inc, 6
+ br i1 %continue, label %loop, label %exit
+
+ exit:
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/scev-aa.ll b/test/Analysis/ScalarEvolution/scev-aa.ll
index a0abbb7..e2123f4 100644
--- a/test/Analysis/ScalarEvolution/scev-aa.ll
+++ b/test/Analysis/ScalarEvolution/scev-aa.ll
@@ -19,11 +19,11 @@ entry:
bb:
%i = phi i64 [ 0, %entry ], [ %i.next, %bb ]
- %pi = getelementptr double* %p, i64 %i
+ %pi = getelementptr double, double* %p, i64 %i
%i.next = add i64 %i, 1
- %pi.next = getelementptr double* %p, i64 %i.next
- %x = load double* %pi
- %y = load double* %pi.next
+ %pi.next = getelementptr double, double* %p, i64 %i.next
+ %x = load double, double* %pi
+ %y = load double, double* %pi.next
%z = fmul double %x, %y
store double %z, double* %pi
%exitcond = icmp eq i64 %i.next, %n
@@ -58,18 +58,18 @@ bb:
%i.next = add i64 %i, 1
%e = add i64 %i, %j
- %pi.j = getelementptr double* %p, i64 %e
+ %pi.j = getelementptr double, double* %p, i64 %e
%f = add i64 %i.next, %j
- %pi.next.j = getelementptr double* %p, i64 %f
- %x = load double* %pi.j
- %y = load double* %pi.next.j
+ %pi.next.j = getelementptr double, double* %p, i64 %f
+ %x = load double, double* %pi.j
+ %y = load double, double* %pi.next.j
%z = fmul double %x, %y
store double %z, double* %pi.j
%o = add i64 %j, 91
%g = add i64 %i, %o
- %pi.j.next = getelementptr double* %p, i64 %g
- %a = load double* %pi.j.next
+ %pi.j.next = getelementptr double, double* %p, i64 %g
+ %a = load double, double* %pi.j.next
%b = fmul double %x, %a
store double %b, double* %pi.j.next
@@ -115,18 +115,18 @@ bb:
%i.next = add i64 %i, 1
%e = add i64 %i, %j
- %pi.j = getelementptr double* %p, i64 %e
+ %pi.j = getelementptr double, double* %p, i64 %e
%f = add i64 %i.next, %j
- %pi.next.j = getelementptr double* %p, i64 %f
- %x = load double* %pi.j
- %y = load double* %pi.next.j
+ %pi.next.j = getelementptr double, double* %p, i64 %f
+ %x = load double, double* %pi.j
+ %y = load double, double* %pi.next.j
%z = fmul double %x, %y
store double %z, double* %pi.j
%o = add i64 %j, %n
%g = add i64 %i, %o
- %pi.j.next = getelementptr double* %p, i64 %g
- %a = load double* %pi.j.next
+ %pi.j.next = getelementptr double, double* %p, i64 %g
+ %a = load double, double* %pi.j.next
%b = fmul double %x, %a
store double %b, double* %pi.j.next
@@ -161,12 +161,12 @@ return:
define void @foo() {
entry:
%A = alloca %struct.A
- %B = getelementptr %struct.A* %A, i32 0, i32 0
+ %B = getelementptr %struct.A, %struct.A* %A, i32 0, i32 0
%Q = bitcast %struct.B* %B to %struct.A*
- %Z = getelementptr %struct.A* %Q, i32 0, i32 1
- %C = getelementptr %struct.B* %B, i32 1
+ %Z = getelementptr %struct.A, %struct.A* %Q, i32 0, i32 1
+ %C = getelementptr %struct.B, %struct.B* %B, i32 1
%X = bitcast %struct.B* %C to i32*
- %Y = getelementptr %struct.A* %A, i32 0, i32 1
+ %Y = getelementptr %struct.A, %struct.A* %A, i32 0, i32 1
ret void
}
@@ -181,12 +181,12 @@ entry:
define void @bar() {
%M = alloca %struct.A
- %N = getelementptr %struct.A* %M, i32 0, i32 0
+ %N = getelementptr %struct.A, %struct.A* %M, i32 0, i32 0
%O = bitcast %struct.B* %N to %struct.A*
- %P = getelementptr %struct.A* %O, i32 0, i32 1
- %R = getelementptr %struct.B* %N, i32 1
+ %P = getelementptr %struct.A, %struct.A* %O, i32 0, i32 1
+ %R = getelementptr %struct.B, %struct.B* %N, i32 1
%W = bitcast %struct.B* %R to i32*
- %V = getelementptr %struct.A* %M, i32 0, i32 1
+ %V = getelementptr %struct.A, %struct.A* %M, i32 0, i32 1
ret void
}
@@ -200,9 +200,9 @@ entry:
for.body: ; preds = %entry, %for.body
%i = phi i64 [ %inc, %for.body ], [ 0, %entry ] ; <i64> [#uses=2]
%inc = add nsw i64 %i, 1 ; <i64> [#uses=2]
- %arrayidx = getelementptr inbounds i64* %p, i64 %inc
+ %arrayidx = getelementptr inbounds i64, i64* %p, i64 %inc
store i64 0, i64* %arrayidx
- %tmp6 = load i64* %p ; <i64> [#uses=1]
+ %tmp6 = load i64, i64* %p ; <i64> [#uses=1]
%cmp = icmp slt i64 %inc, %tmp6 ; <i1> [#uses=1]
br i1 %cmp, label %for.body, label %for.end
diff --git a/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll b/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll
index 3ca32bd..77f3482 100644
--- a/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll
+++ b/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll
@@ -66,7 +66,7 @@ define i64 @bad.1(i32 %start, i32 %low.limit, i32 %high.limit, i1* %unknown) {
br i1 %break.early, label %continue.1, label %early.exit
continue.1:
- %cond = load volatile i1* %unknown
+ %cond = load volatile i1, i1* %unknown
%idx.inc = add nsw i32 %idx, 1
br i1 %cond, label %loop, label %continue
diff --git a/test/Analysis/ScalarEvolution/sext-inreg.ll b/test/Analysis/ScalarEvolution/sext-inreg.ll
index 8b3d641..8f1d5bd 100644
--- a/test/Analysis/ScalarEvolution/sext-inreg.ll
+++ b/test/Analysis/ScalarEvolution/sext-inreg.ll
@@ -16,7 +16,7 @@ bb: ; preds = %bb, %entry
%t2 = ashr i64 %t1, 7 ; <i32> [#uses=1]
%s1 = shl i64 %i.01, 5 ; <i32> [#uses=1]
%s2 = ashr i64 %s1, 5 ; <i32> [#uses=1]
- %t3 = getelementptr i64* %x, i64 %i.01 ; <i64*> [#uses=1]
+ %t3 = getelementptr i64, i64* %x, i64 %i.01 ; <i64*> [#uses=1]
store i64 0, i64* %t3, align 1
%indvar.next = add i64 %i.01, 199 ; <i32> [#uses=2]
%exitcond = icmp eq i64 %indvar.next, %n ; <i1> [#uses=1]
diff --git a/test/Analysis/ScalarEvolution/sext-iv-0.ll b/test/Analysis/ScalarEvolution/sext-iv-0.ll
index d5d3268..5634078 100644
--- a/test/Analysis/ScalarEvolution/sext-iv-0.ll
+++ b/test/Analysis/ScalarEvolution/sext-iv-0.ll
@@ -13,21 +13,21 @@ bb1.thread:
bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i64 [ -128, %bb1.thread ], [ %8, %bb1 ] ; <i64> [#uses=3]
; CHECK: %i.0.reg2mem.0
-; CHECK-NEXT: --> {-128,+,1}<%bb1> Exits: 127
+; CHECK-NEXT: --> {-128,+,1}<%bb1>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 127
%0 = trunc i64 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1]
; CHECK: %0
-; CHECK-NEXT: --> {-128,+,1}<%bb1> Exits: 127
+; CHECK-NEXT: --> {-128,+,1}<%bb1>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 127
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
; CHECK: %1
-; CHECK-NEXT: --> {-128,+,1}<%bb1> Exits: 127
+; CHECK-NEXT: --> {-128,+,1}<%bb1>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 127
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
; CHECK: %2
-; CHECK-NEXT: --> {-128,+,1}<nsw><%bb1> Exits: 127
- %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double* %3, align 8 ; <double> [#uses=1]
+; CHECK-NEXT: --> {-128,+,1}<nsw><%bb1>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 127
+ %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
+ %4 = load double, double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1]
+ %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
store double %5, double* %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
diff --git a/test/Analysis/ScalarEvolution/sext-iv-1.ll b/test/Analysis/ScalarEvolution/sext-iv-1.ll
index a6f70db..575b744 100644
--- a/test/Analysis/ScalarEvolution/sext-iv-1.ll
+++ b/test/Analysis/ScalarEvolution/sext-iv-1.ll
@@ -23,11 +23,11 @@ bb1: ; preds = %bb1, %bb1.thread
%0 = trunc i64 %i.0.reg2mem.0 to i7 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double* %3, align 8 ; <double> [#uses=1]
+ %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
+ %4 = load double, double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i7 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1]
+ %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
store double %5, double* %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
@@ -46,11 +46,11 @@ bb1: ; preds = %bb1, %bb1.thread
%0 = trunc i64 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double* %3, align 8 ; <double> [#uses=1]
+ %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
+ %4 = load double, double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1]
+ %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
store double %5, double* %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 128 ; <i1> [#uses=1]
@@ -69,11 +69,11 @@ bb1: ; preds = %bb1, %bb1.thread
%0 = trunc i64 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double* %3, align 8 ; <double> [#uses=1]
+ %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
+ %4 = load double, double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1]
+ %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
store double %5, double* %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
@@ -92,11 +92,11 @@ bb1: ; preds = %bb1, %bb1.thread
%0 = trunc i64 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double* %3, align 8 ; <double> [#uses=1]
+ %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
+ %4 = load double, double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1]
+ %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
store double %5, double* %7, align 8
%8 = add i64 %i.0.reg2mem.0, -1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
diff --git a/test/Analysis/ScalarEvolution/sext-iv-2.ll b/test/Analysis/ScalarEvolution/sext-iv-2.ll
index 97e252c..8749ff3 100644
--- a/test/Analysis/ScalarEvolution/sext-iv-2.ll
+++ b/test/Analysis/ScalarEvolution/sext-iv-2.ll
@@ -1,9 +1,9 @@
; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s
; CHECK: %tmp3 = sext i8 %tmp2 to i32
-; CHECK: --> (sext i8 {0,+,1}<%bb1> to i32) Exits: -1
+; CHECK: --> (sext i8 {0,+,1}<%bb1> to i32){{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: -1
; CHECK: %tmp4 = mul i32 %tmp3, %i.02
-; CHECK: --> ((sext i8 {0,+,1}<%bb1> to i32) * {0,+,1}<%bb>) Exits: {0,+,-1}<%bb>
+; CHECK: --> ((sext i8 {0,+,1}<%bb1> to i32) * {0,+,1}<%bb>){{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: {0,+,-1}<%bb>
; These sexts are not foldable.
@@ -32,7 +32,7 @@ bb1: ; preds = %bb2, %bb.nph
%tmp4 = mul i32 %tmp3, %i.02 ; <i32> [#uses=1]
%tmp5 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
%tmp6 = sext i32 %j.01 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr [32 x [256 x i32]]* @table, i64 0, i64 %tmp5, i64 %tmp6 ; <i32*> [#uses=1]
+ %tmp7 = getelementptr [32 x [256 x i32]], [32 x [256 x i32]]* @table, i64 0, i64 %tmp5, i64 %tmp6 ; <i32*> [#uses=1]
store i32 %tmp4, i32* %tmp7, align 4
%tmp8 = add i32 %j.01, 1 ; <i32> [#uses=2]
br label %bb2
@@ -56,7 +56,7 @@ bb4.bb5_crit_edge: ; preds = %bb4
br label %bb5
bb5: ; preds = %bb4.bb5_crit_edge, %entry
- %tmp12 = load i32* getelementptr ([32 x [256 x i32]]* @table, i64 0, i64 9, i64 132), align 16 ; <i32> [#uses=1]
+ %tmp12 = load i32, i32* getelementptr ([32 x [256 x i32]], [32 x [256 x i32]]* @table, i64 0, i64 9, i64 132), align 16 ; <i32> [#uses=1]
%tmp13 = icmp eq i32 %tmp12, -1116 ; <i1> [#uses=1]
br i1 %tmp13, label %bb7, label %bb6
diff --git a/test/Analysis/ScalarEvolution/sle.ll b/test/Analysis/ScalarEvolution/sle.ll
index f38f6b6..f24c480 100644
--- a/test/Analysis/ScalarEvolution/sle.ll
+++ b/test/Analysis/ScalarEvolution/sle.ll
@@ -14,8 +14,8 @@ entry:
for.body: ; preds = %for.body, %entry
%i = phi i64 [ %i.next, %for.body ], [ 0, %entry ] ; <i64> [#uses=2]
- %arrayidx = getelementptr double* %p, i64 %i ; <double*> [#uses=2]
- %t4 = load double* %arrayidx ; <double> [#uses=1]
+ %arrayidx = getelementptr double, double* %p, i64 %i ; <double*> [#uses=2]
+ %t4 = load double, double* %arrayidx ; <double> [#uses=1]
%mul = fmul double %t4, 2.200000e+00 ; <double> [#uses=1]
store double %mul, double* %arrayidx
%i.next = add nsw i64 %i, 1 ; <i64> [#uses=2]
diff --git a/test/Analysis/ScalarEvolution/trip-count.ll b/test/Analysis/ScalarEvolution/trip-count.ll
index f89125a..1b75c88 100644
--- a/test/Analysis/ScalarEvolution/trip-count.ll
+++ b/test/Analysis/ScalarEvolution/trip-count.ll
@@ -1,16 +1,20 @@
; RUN: opt < %s -analyze -scalar-evolution -scalar-evolution-max-iterations=0 | FileCheck %s
; PR1101
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
@A = weak global [1000 x i32] zeroinitializer, align 32
+; CHECK: Printing analysis 'Scalar Evolution Analysis' for function 'test1':
; CHECK: backedge-taken count is 10000
-define void @test(i32 %N) {
+define void @test1(i32 %N) {
entry:
br label %bb3
bb: ; preds = %bb3
- %tmp = getelementptr [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
+ %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
store i32 123, i32* %tmp
%tmp2 = add i32 %i.0, 1 ; <i32> [#uses=1]
br label %bb3
@@ -26,3 +30,61 @@ bb5: ; preds = %bb3
return: ; preds = %bb5
ret void
}
+
+; PR22795
+; CHECK: Printing analysis 'Scalar Evolution Analysis' for function 'test2':
+; CHECK: %iv = phi i32 [ -1, %entry ], [ %next.1, %for.inc.1 ]
+; CHECK-NEXT: --> {-1,+,2}<%preheader> U: full-set S: full-set Exits: 13
+
+define i32 @test2() {
+entry:
+ %bins = alloca [16 x i64], align 16
+ %0 = bitcast [16 x i64]* %bins to i8*
+ call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 128, i32 16, i1 false)
+ br label %preheader
+
+preheader: ; preds = %for.inc.1, %entry
+ %v11 = phi i64 [ 0, %entry ], [ %next12.1, %for.inc.1 ]
+ %iv = phi i32 [ -1, %entry ], [ %next.1, %for.inc.1 ]
+ %cmp = icmp sgt i64 %v11, 0
+ br i1 %cmp, label %for.body, label %for.inc
+
+for.body: ; preds = %preheader
+ %zext = zext i32 %iv to i64
+ %arrayidx = getelementptr [16 x i64], [16 x i64]* %bins, i64 0, i64 %v11
+ %loaded = load i64, i64* %arrayidx, align 8
+ %add = add i64 %loaded, 1
+ %add2 = add i64 %add, %zext
+ store i64 %add2, i64* %arrayidx, align 8
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %preheader
+ %next12 = add nuw nsw i64 %v11, 1
+ %next = add nsw i32 %iv, 1
+ br i1 true, label %for.body.1, label %for.inc.1
+
+end: ; preds = %for.inc.1
+ %arrayidx8 = getelementptr [16 x i64], [16 x i64]* %bins, i64 0, i64 2
+ %load = load i64, i64* %arrayidx8, align 16
+ %shr4 = lshr i64 %load, 32
+ %conv = trunc i64 %shr4 to i32
+ ret i32 %conv
+
+for.body.1: ; preds = %for.inc
+ %zext.1 = zext i32 %next to i64
+ %arrayidx.1 = getelementptr [16 x i64], [16 x i64]* %bins, i64 0, i64 %next12
+ %loaded.1 = load i64, i64* %arrayidx.1, align 8
+ %add.1 = add i64 %loaded.1, 1
+ %add2.1 = add i64 %add.1, %zext.1
+ store i64 %add2.1, i64* %arrayidx.1, align 8
+ br label %for.inc.1
+
+for.inc.1: ; preds = %for.body.1, %for.inc
+ %next12.1 = add nuw nsw i64 %next12, 1
+ %next.1 = add nuw nsw i32 %next, 1
+ %exitcond.1 = icmp eq i64 %next12.1, 16
+ br i1 %exitcond.1, label %end, label %preheader
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #0
diff --git a/test/Analysis/ScalarEvolution/trip-count11.ll b/test/Analysis/ScalarEvolution/trip-count11.ll
index e14af08..819a89e 100644
--- a/test/Analysis/ScalarEvolution/trip-count11.ll
+++ b/test/Analysis/ScalarEvolution/trip-count11.ll
@@ -13,15 +13,15 @@ entry:
for.cond: ; preds = %for.inc, %entry
%sum.0 = phi i32 [ 0, %entry ], [ %add, %for.inc ]
-; CHECK: --> %sum.0 Exits: 28
+; CHECK: --> %sum.0{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 28
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
%cmp = icmp ult i32 %i.0, 8
br i1 %cmp, label %for.inc, label %for.end
for.inc: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
- %arrayidx = getelementptr inbounds [8 x i32]* @foo.a, i64 0, i64 %idxprom
- %0 = load i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* @foo.a, i64 0, i64 %idxprom
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
br label %for.cond
@@ -36,15 +36,15 @@ entry:
for.cond: ; preds = %for.inc, %entry
%sum.0 = phi i32 [ 0, %entry ], [ %add, %for.inc ]
-; CHECK: --> %sum.0 Exits: 28
+; CHECK: --> %sum.0{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 28
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
%cmp = icmp ult i32 %i.0, 8
br i1 %cmp, label %for.inc, label %for.end
for.inc: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
- %arrayidx = getelementptr inbounds [8 x i32] addrspace(1)* @foo.a_as1, i64 0, i64 %idxprom
- %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [8 x i32], [8 x i32] addrspace(1)* @foo.a_as1, i64 0, i64 %idxprom
+ %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
br label %for.cond
diff --git a/test/Analysis/ScalarEvolution/trip-count12.ll b/test/Analysis/ScalarEvolution/trip-count12.ll
index 8f960e1..d0086ee 100644
--- a/test/Analysis/ScalarEvolution/trip-count12.ll
+++ b/test/Analysis/ScalarEvolution/trip-count12.ll
@@ -16,8 +16,8 @@ for.body: ; preds = %for.body, %for.body
%p.addr.05 = phi i16* [ %incdec.ptr, %for.body ], [ %p, %for.body.preheader ]
%len.addr.04 = phi i32 [ %sub, %for.body ], [ %len, %for.body.preheader ]
%res.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
- %incdec.ptr = getelementptr inbounds i16* %p.addr.05, i32 1
- %0 = load i16* %p.addr.05, align 2
+ %incdec.ptr = getelementptr inbounds i16, i16* %p.addr.05, i32 1
+ %0 = load i16, i16* %p.addr.05, align 2
%conv = zext i16 %0 to i32
%add = add i32 %conv, %res.03
%sub = add nsw i32 %len.addr.04, -2
diff --git a/test/Analysis/ScalarEvolution/trip-count2.ll b/test/Analysis/ScalarEvolution/trip-count2.ll
index e76488a..d988eff 100644
--- a/test/Analysis/ScalarEvolution/trip-count2.ll
+++ b/test/Analysis/ScalarEvolution/trip-count2.ll
@@ -10,7 +10,7 @@ entry:
br label %bb3
bb: ; preds = %bb3
- %tmp = getelementptr [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
+ %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
store i32 123, i32* %tmp
%tmp4 = mul i32 %i.0, 4 ; <i32> [#uses=1]
%tmp5 = or i32 %tmp4, 1
diff --git a/test/Analysis/ScalarEvolution/trip-count3.ll b/test/Analysis/ScalarEvolution/trip-count3.ll
index 850e035..cce0182 100644
--- a/test/Analysis/ScalarEvolution/trip-count3.ll
+++ b/test/Analysis/ScalarEvolution/trip-count3.ll
@@ -48,10 +48,10 @@ sha_update.exit.exitStub: ; preds = %bb3.i
ret void
bb2.i: ; preds = %bb3.i
- %1 = getelementptr %struct.SHA_INFO* %sha_info, i64 0, i32 3
+ %1 = getelementptr %struct.SHA_INFO, %struct.SHA_INFO* %sha_info, i64 0, i32 3
%2 = bitcast [16 x i32]* %1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %buffer_addr.0.i, i64 64, i32 1, i1 false)
- %3 = getelementptr %struct.SHA_INFO* %sha_info, i64 0, i32 3, i64 0
+ %3 = getelementptr %struct.SHA_INFO, %struct.SHA_INFO* %sha_info, i64 0, i32 3, i64 0
%4 = bitcast i32* %3 to i8*
br label %codeRepl
@@ -61,7 +61,7 @@ codeRepl: ; preds = %bb2.i
byte_reverse.exit.i: ; preds = %codeRepl
call fastcc void @sha_transform(%struct.SHA_INFO* %sha_info) nounwind
- %5 = getelementptr i8* %buffer_addr.0.i, i64 64
+ %5 = getelementptr i8, i8* %buffer_addr.0.i, i64 64
%6 = add i32 %count_addr.0.i, -64
br label %bb3.i
diff --git a/test/Analysis/ScalarEvolution/trip-count4.ll b/test/Analysis/ScalarEvolution/trip-count4.ll
index b7184a4..966ffd2 100644
--- a/test/Analysis/ScalarEvolution/trip-count4.ll
+++ b/test/Analysis/ScalarEvolution/trip-count4.ll
@@ -12,8 +12,8 @@ loop: ; preds = %loop, %entry
%indvar = phi i64 [ %n, %entry ], [ %indvar.next, %loop ] ; <i64> [#uses=4]
%s0 = shl i64 %indvar, 8 ; <i64> [#uses=1]
%indvar.i8 = ashr i64 %s0, 8 ; <i64> [#uses=1]
- %t0 = getelementptr double* %d, i64 %indvar.i8 ; <double*> [#uses=2]
- %t1 = load double* %t0 ; <double> [#uses=1]
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8 ; <double*> [#uses=2]
+ %t1 = load double, double* %t0 ; <double> [#uses=1]
%t2 = fmul double %t1, 1.000000e-01 ; <double> [#uses=1]
store double %t2, double* %t0
%indvar.next = sub i64 %indvar, 1 ; <i64> [#uses=2]
diff --git a/test/Analysis/ScalarEvolution/trip-count5.ll b/test/Analysis/ScalarEvolution/trip-count5.ll
index 68a1ae1..dc02fed 100644
--- a/test/Analysis/ScalarEvolution/trip-count5.ll
+++ b/test/Analysis/ScalarEvolution/trip-count5.ll
@@ -9,7 +9,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define float @t(float* %pTmp1, float* %peakWeight, float* %nrgReducePeakrate, i32 %bim) nounwind {
entry:
- %tmp3 = load float* %peakWeight, align 4 ; <float> [#uses=2]
+ %tmp3 = load float, float* %peakWeight, align 4 ; <float> [#uses=2]
%tmp2538 = icmp sgt i32 %bim, 0 ; <i1> [#uses=1]
br i1 %tmp2538, label %bb.nph, label %bb4
@@ -21,13 +21,13 @@ bb: ; preds = %bb1, %bb.nph
%hiPart.035 = phi i32 [ %tmp12, %bb1 ], [ 0, %bb.nph ] ; <i32> [#uses=2]
%peakCount.034 = phi float [ %tmp19, %bb1 ], [ %tmp3, %bb.nph ] ; <float> [#uses=1]
%tmp6 = sext i32 %hiPart.035 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr float* %pTmp1, i64 %tmp6 ; <float*> [#uses=1]
- %tmp8 = load float* %tmp7, align 4 ; <float> [#uses=1]
+ %tmp7 = getelementptr float, float* %pTmp1, i64 %tmp6 ; <float*> [#uses=1]
+ %tmp8 = load float, float* %tmp7, align 4 ; <float> [#uses=1]
%tmp10 = fadd float %tmp8, %distERBhi.036 ; <float> [#uses=3]
%tmp12 = add i32 %hiPart.035, 1 ; <i32> [#uses=3]
%tmp15 = sext i32 %tmp12 to i64 ; <i64> [#uses=1]
- %tmp16 = getelementptr float* %peakWeight, i64 %tmp15 ; <float*> [#uses=1]
- %tmp17 = load float* %tmp16, align 4 ; <float> [#uses=1]
+ %tmp16 = getelementptr float, float* %peakWeight, i64 %tmp15 ; <float*> [#uses=1]
+ %tmp17 = load float, float* %tmp16, align 4 ; <float> [#uses=1]
%tmp19 = fadd float %tmp17, %peakCount.034 ; <float> [#uses=2]
br label %bb1
diff --git a/test/Analysis/ScalarEvolution/trip-count6.ll b/test/Analysis/ScalarEvolution/trip-count6.ll
index 0f394a0..7980bbd 100644
--- a/test/Analysis/ScalarEvolution/trip-count6.ll
+++ b/test/Analysis/ScalarEvolution/trip-count6.ll
@@ -12,8 +12,8 @@ entry:
bb: ; preds = %bb4, %entry
%mode.0 = phi i8 [ 0, %entry ], [ %indvar.next, %bb4 ] ; <i8> [#uses=4]
zext i8 %mode.0 to i32 ; <i32>:1 [#uses=1]
- getelementptr [4 x i32]* @mode_table, i32 0, i32 %1 ; <i32*>:2 [#uses=1]
- load i32* %2, align 4 ; <i32>:3 [#uses=1]
+ getelementptr [4 x i32], [4 x i32]* @mode_table, i32 0, i32 %1 ; <i32*>:2 [#uses=1]
+ load i32, i32* %2, align 4 ; <i32>:3 [#uses=1]
icmp eq i32 %3, %0 ; <i1>:4 [#uses=1]
br i1 %4, label %bb1, label %bb2
diff --git a/test/Analysis/ScalarEvolution/trip-count7.ll b/test/Analysis/ScalarEvolution/trip-count7.ll
index d01a18a..bbe76c4 100644
--- a/test/Analysis/ScalarEvolution/trip-count7.ll
+++ b/test/Analysis/ScalarEvolution/trip-count7.ll
@@ -72,57 +72,57 @@ bb.i: ; preds = %bb7.i
%tmp = add i32 %j.0.i, 1 ; <i32> [#uses=5]
store i32 0, i32* %q, align 4
%tmp1 = sext i32 %tmp to i64 ; <i64> [#uses=1]
- %tmp2 = getelementptr [9 x i32]* %a, i64 0, i64 %tmp1 ; <i32*> [#uses=1]
- %tmp3 = load i32* %tmp2, align 4 ; <i32> [#uses=1]
+ %tmp2 = getelementptr [9 x i32], [9 x i32]* %a, i64 0, i64 %tmp1 ; <i32*> [#uses=1]
+ %tmp3 = load i32, i32* %tmp2, align 4 ; <i32> [#uses=1]
%tmp4 = icmp eq i32 %tmp3, 0 ; <i1> [#uses=1]
br i1 %tmp4, label %bb.i.bb7.i.backedge_crit_edge, label %bb1.i
bb1.i: ; preds = %bb.i
%tmp5 = add i32 %j.0.i, 2 ; <i32> [#uses=1]
%tmp6 = sext i32 %tmp5 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr [17 x i32]* %b, i64 0, i64 %tmp6 ; <i32*> [#uses=1]
- %tmp8 = load i32* %tmp7, align 4 ; <i32> [#uses=1]
+ %tmp7 = getelementptr [17 x i32], [17 x i32]* %b, i64 0, i64 %tmp6 ; <i32*> [#uses=1]
+ %tmp8 = load i32, i32* %tmp7, align 4 ; <i32> [#uses=1]
%tmp9 = icmp eq i32 %tmp8, 0 ; <i1> [#uses=1]
br i1 %tmp9, label %bb1.i.bb7.i.backedge_crit_edge, label %bb2.i
bb2.i: ; preds = %bb1.i
%tmp10 = sub i32 7, %j.0.i ; <i32> [#uses=1]
%tmp11 = sext i32 %tmp10 to i64 ; <i64> [#uses=1]
- %tmp12 = getelementptr [15 x i32]* %c, i64 0, i64 %tmp11 ; <i32*> [#uses=1]
- %tmp13 = load i32* %tmp12, align 4 ; <i32> [#uses=1]
+ %tmp12 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp11 ; <i32*> [#uses=1]
+ %tmp13 = load i32, i32* %tmp12, align 4 ; <i32> [#uses=1]
%tmp14 = icmp eq i32 %tmp13, 0 ; <i1> [#uses=1]
br i1 %tmp14, label %bb2.i.bb7.i.backedge_crit_edge, label %bb3.i
bb3.i: ; preds = %bb2.i
- %tmp15 = getelementptr [9 x i32]* %x1, i64 0, i64 1 ; <i32*> [#uses=1]
+ %tmp15 = getelementptr [9 x i32], [9 x i32]* %x1, i64 0, i64 1 ; <i32*> [#uses=1]
store i32 %tmp, i32* %tmp15, align 4
%tmp16 = sext i32 %tmp to i64 ; <i64> [#uses=1]
- %tmp17 = getelementptr [9 x i32]* %a, i64 0, i64 %tmp16 ; <i32*> [#uses=1]
+ %tmp17 = getelementptr [9 x i32], [9 x i32]* %a, i64 0, i64 %tmp16 ; <i32*> [#uses=1]
store i32 0, i32* %tmp17, align 4
%tmp18 = add i32 %j.0.i, 2 ; <i32> [#uses=1]
%tmp19 = sext i32 %tmp18 to i64 ; <i64> [#uses=1]
- %tmp20 = getelementptr [17 x i32]* %b, i64 0, i64 %tmp19 ; <i32*> [#uses=1]
+ %tmp20 = getelementptr [17 x i32], [17 x i32]* %b, i64 0, i64 %tmp19 ; <i32*> [#uses=1]
store i32 0, i32* %tmp20, align 4
%tmp21 = sub i32 7, %j.0.i ; <i32> [#uses=1]
%tmp22 = sext i32 %tmp21 to i64 ; <i64> [#uses=1]
- %tmp23 = getelementptr [15 x i32]* %c, i64 0, i64 %tmp22 ; <i32*> [#uses=1]
+ %tmp23 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp22 ; <i32*> [#uses=1]
store i32 0, i32* %tmp23, align 4
call void @Try(i32 2, i32* %q, i32* %b9, i32* %a10, i32* %c11, i32* %x1.sub) nounwind
- %tmp24 = load i32* %q, align 4 ; <i32> [#uses=1]
+ %tmp24 = load i32, i32* %q, align 4 ; <i32> [#uses=1]
%tmp25 = icmp eq i32 %tmp24, 0 ; <i1> [#uses=1]
br i1 %tmp25, label %bb5.i, label %bb3.i.bb7.i.backedge_crit_edge
bb5.i: ; preds = %bb3.i
%tmp26 = sext i32 %tmp to i64 ; <i64> [#uses=1]
- %tmp27 = getelementptr [9 x i32]* %a, i64 0, i64 %tmp26 ; <i32*> [#uses=1]
+ %tmp27 = getelementptr [9 x i32], [9 x i32]* %a, i64 0, i64 %tmp26 ; <i32*> [#uses=1]
store i32 1, i32* %tmp27, align 4
%tmp28 = add i32 %j.0.i, 2 ; <i32> [#uses=1]
%tmp29 = sext i32 %tmp28 to i64 ; <i64> [#uses=1]
- %tmp30 = getelementptr [17 x i32]* %b, i64 0, i64 %tmp29 ; <i32*> [#uses=1]
+ %tmp30 = getelementptr [17 x i32], [17 x i32]* %b, i64 0, i64 %tmp29 ; <i32*> [#uses=1]
store i32 1, i32* %tmp30, align 4
%tmp31 = sub i32 7, %j.0.i ; <i32> [#uses=1]
%tmp32 = sext i32 %tmp31 to i64 ; <i64> [#uses=1]
- %tmp33 = getelementptr [15 x i32]* %c, i64 0, i64 %tmp32 ; <i32*> [#uses=1]
+ %tmp33 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp32 ; <i32*> [#uses=1]
store i32 1, i32* %tmp33, align 4
br label %bb7.i.backedge
@@ -131,7 +131,7 @@ bb7.i.backedge: ; preds = %bb3.i.bb7.i.backedge_crit_edge, %bb2.i.bb7.i.backedg
bb7.i: ; preds = %bb7.i.backedge, %newFuncRoot
%j.0.i = phi i32 [ 0, %newFuncRoot ], [ %tmp, %bb7.i.backedge ] ; <i32> [#uses=8]
- %tmp34 = load i32* %q, align 4 ; <i32> [#uses=1]
+ %tmp34 = load i32, i32* %q, align 4 ; <i32> [#uses=1]
%tmp35 = icmp eq i32 %tmp34, 0 ; <i1> [#uses=1]
%tmp36 = icmp ne i32 %j.0.i, 8 ; <i1> [#uses=1]
%tmp37 = and i1 %tmp35, %tmp36 ; <i1> [#uses=1]
diff --git a/test/Analysis/ScalarEvolution/zext-signed-addrec.ll b/test/Analysis/ScalarEvolution/zext-signed-addrec.ll
index 4369820..31ebb3e 100644
--- a/test/Analysis/ScalarEvolution/zext-signed-addrec.ll
+++ b/test/Analysis/ScalarEvolution/zext-signed-addrec.ll
@@ -15,16 +15,16 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK-LABEL: foo
define i32 @foo() {
entry:
- %.pr = load i32* @b, align 4
+ %.pr = load i32, i32* @b, align 4
%cmp10 = icmp slt i32 %.pr, 1
br i1 %cmp10, label %for.cond1.preheader.lr.ph, label %entry.for.end9_crit_edge
entry.for.end9_crit_edge: ; preds = %entry
- %.pre = load i32* @c, align 4
+ %.pre = load i32, i32* @c, align 4
br label %for.end9
for.cond1.preheader.lr.ph: ; preds = %entry
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.cond1.preheader.for.cond1.preheader.split_crit_edge, label %return.loopexit.split
@@ -63,7 +63,7 @@ for.cond.for.end9_crit_edge: ; preds = %for.inc8
for.end9: ; preds = %entry.for.end9_crit_edge, %for.cond.for.end9_crit_edge
%3 = phi i32 [ %.pre, %entry.for.end9_crit_edge ], [ %shl, %for.cond.for.end9_crit_edge ]
- %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %3) #2
+ %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %3) #2
br label %return
return.loopexit.split: ; preds = %for.cond1.preheader.lr.ph
diff --git a/test/Analysis/ScalarEvolution/zext-wrap.ll b/test/Analysis/ScalarEvolution/zext-wrap.ll
index 104ed41..f56e455 100644
--- a/test/Analysis/ScalarEvolution/zext-wrap.ll
+++ b/test/Analysis/ScalarEvolution/zext-wrap.ll
@@ -10,7 +10,7 @@ bb.i: ; preds = %bb1.i, %bb.nph
; This cast shouldn't be folded into the addrec.
; CHECK: %tmp = zext i8 %l_95.0.i1 to i16
-; CHECK: --> (zext i8 {0,+,-1}<%bb.i> to i16) Exits: 2
+; CHECK: --> (zext i8 {0,+,-1}<%bb.i> to i16){{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 2
%tmp = zext i8 %l_95.0.i1 to i16
diff --git a/test/Analysis/ScopedNoAliasAA/basic-domains.ll b/test/Analysis/ScopedNoAliasAA/basic-domains.ll
index 7633a6d..c2b5bbd 100644
--- a/test/Analysis/ScopedNoAliasAA/basic-domains.ll
+++ b/test/Analysis/ScopedNoAliasAA/basic-domains.ll
@@ -5,16 +5,16 @@ target triple = "x86_64-unknown-linux-gnu"
define void @foo1(float* nocapture %a, float* nocapture readonly %c) #0 {
entry:
; CHECK-LABEL: Function: foo1
- %0 = load float* %c, align 4, !alias.scope !9
- %arrayidx.i = getelementptr inbounds float* %a, i64 5
+ %0 = load float, float* %c, align 4, !alias.scope !9
+ %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !noalias !6
- %1 = load float* %c, align 4, !alias.scope !5
- %arrayidx.i2 = getelementptr inbounds float* %a, i64 15
+ %1 = load float, float* %c, align 4, !alias.scope !5
+ %arrayidx.i2 = getelementptr inbounds float, float* %a, i64 15
store float %1, float* %arrayidx.i2, align 4, !noalias !6
- %2 = load float* %c, align 4, !alias.scope !6
- %arrayidx.i3 = getelementptr inbounds float* %a, i64 16
+ %2 = load float, float* %c, align 4, !alias.scope !6
+ %arrayidx.i3 = getelementptr inbounds float, float* %a, i64 16
store float %2, float* %arrayidx.i3, align 4, !noalias !5
ret void
@@ -42,15 +42,15 @@ attributes #0 = { nounwind uwtable }
; A list of scopes from both domains.
!9 = !{!2, !4, !7}
-; CHECK: NoAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
-; CHECK: NoAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
-; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
-; CHECK: NoAlias: %1 = load float* %c, align 4, !alias.scope !7 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
-; CHECK: NoAlias: %1 = load float* %c, align 4, !alias.scope !7 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
-; CHECK: NoAlias: %1 = load float* %c, align 4, !alias.scope !7 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
-; CHECK: NoAlias: %2 = load float* %c, align 4, !alias.scope !6 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
-; CHECK: NoAlias: %2 = load float* %c, align 4, !alias.scope !6 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
-; CHECK: MayAlias: %2 = load float* %c, align 4, !alias.scope !6 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
+; CHECK: NoAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
+; CHECK: NoAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
+; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
+; CHECK: NoAlias: %1 = load float, float* %c, align 4, !alias.scope !7 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
+; CHECK: NoAlias: %1 = load float, float* %c, align 4, !alias.scope !7 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
+; CHECK: NoAlias: %1 = load float, float* %c, align 4, !alias.scope !7 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
+; CHECK: NoAlias: %2 = load float, float* %c, align 4, !alias.scope !6 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
+; CHECK: NoAlias: %2 = load float, float* %c, align 4, !alias.scope !6 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
+; CHECK: MayAlias: %2 = load float, float* %c, align 4, !alias.scope !6 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
; CHECK: NoAlias: store float %1, float* %arrayidx.i2, align 4, !noalias !6 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
; CHECK: NoAlias: store float %2, float* %arrayidx.i3, align 4, !noalias !7 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
; CHECK: NoAlias: store float %2, float* %arrayidx.i3, align 4, !noalias !7 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
diff --git a/test/Analysis/ScopedNoAliasAA/basic.ll b/test/Analysis/ScopedNoAliasAA/basic.ll
index bb232b5..2625834 100644
--- a/test/Analysis/ScopedNoAliasAA/basic.ll
+++ b/test/Analysis/ScopedNoAliasAA/basic.ll
@@ -5,18 +5,18 @@ target triple = "x86_64-unknown-linux-gnu"
define void @foo1(float* nocapture %a, float* nocapture readonly %c) #0 {
entry:
; CHECK-LABEL: Function: foo1
- %0 = load float* %c, align 4, !alias.scope !1
- %arrayidx.i = getelementptr inbounds float* %a, i64 5
+ %0 = load float, float* %c, align 4, !alias.scope !1
+ %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !noalias !1
- %1 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %1 = load float, float* %c, align 4
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %1, float* %arrayidx, align 4
ret void
-; CHECK: NoAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0
-; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4
-; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0
-; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %1, float* %arrayidx, align 4
+; CHECK: NoAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0
+; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4
+; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0
+; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %1, float* %arrayidx, align 4
; CHECK: NoAlias: store float %1, float* %arrayidx, align 4 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0
}
diff --git a/test/Analysis/ScopedNoAliasAA/basic2.ll b/test/Analysis/ScopedNoAliasAA/basic2.ll
index a154b13..a8a17e5 100644
--- a/test/Analysis/ScopedNoAliasAA/basic2.ll
+++ b/test/Analysis/ScopedNoAliasAA/basic2.ll
@@ -5,24 +5,24 @@ target triple = "x86_64-unknown-linux-gnu"
define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
entry:
; CHECK-LABEL: Function: foo2
- %0 = load float* %c, align 4, !alias.scope !0
- %arrayidx.i = getelementptr inbounds float* %a, i64 5
+ %0 = load float, float* %c, align 4, !alias.scope !0
+ %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !alias.scope !5, !noalias !4
- %arrayidx1.i = getelementptr inbounds float* %b, i64 8
+ %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !5
- %1 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %1 = load float, float* %c, align 4
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %1, float* %arrayidx, align 4
ret void
-; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalia
+; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalia
; CHECK: s !5
-; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noali
+; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noali
; CHECK: as !4
-; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4
-; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalias !5
-; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !4
-; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %1, float* %arrayidx, align 4
+; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4
+; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalias !5
+; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !4
+; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %1, float* %arrayidx, align 4
; CHECK: NoAlias: store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !4 <-> store float %0, float* %arrayidx.i, align
; CHECK: 4, !alias.scope !4, !noalias !5
; CHECK: NoAlias: store float %1, float* %arrayidx, align 4 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalias !5
diff --git a/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll b/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll
index 920d6f5..e78529c 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll
@@ -16,11 +16,11 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define %structA** @test(%classA* %this, i32** %p1) #0 align 2 {
entry:
; CHECK-LABEL: @test
-; CHECK: load i32** %p1, align 8, !tbaa
-; CHECK: load i32** getelementptr (%classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa
+; CHECK: load i32*, i32** %p1, align 8, !tbaa
+; CHECK: load i32*, i32** getelementptr (%classC, %classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa
; CHECK: call void @callee
- %0 = load i32** %p1, align 8, !tbaa !1
- %1 = load i32** getelementptr (%classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa !5
+ %0 = load i32*, i32** %p1, align 8, !tbaa !1
+ %1 = load i32*, i32** getelementptr (%classC, %classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa !5
call void @callee(i32* %0, i32* %1)
unreachable
}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll b/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll
index 10da13a..93c34f9 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll
@@ -5,9 +5,9 @@
; CHECK: @test0_yes
; CHECK: add i8 %x, %x
define i8 @test0_yes(i8* %a, i8* %b) nounwind {
- %x = load i8* %a, !tbaa !1
+ %x = load i8, i8* %a, !tbaa !1
store i8 0, i8* %b, !tbaa !2
- %y = load i8* %a, !tbaa !1
+ %y = load i8, i8* %a, !tbaa !1
%z = add i8 %x, %y
ret i8 %z
}
@@ -15,9 +15,9 @@ define i8 @test0_yes(i8* %a, i8* %b) nounwind {
; CHECK: @test0_no
; CHECK: add i8 %x, %y
define i8 @test0_no(i8* %a, i8* %b) nounwind {
- %x = load i8* %a, !tbaa !3
+ %x = load i8, i8* %a, !tbaa !3
store i8 0, i8* %b, !tbaa !4
- %y = load i8* %a, !tbaa !3
+ %y = load i8, i8* %a, !tbaa !3
%z = add i8 %x, %y
ret i8 %z
}
@@ -27,9 +27,9 @@ define i8 @test0_no(i8* %a, i8* %b) nounwind {
; CHECK: @test1_yes
; CHECK: add i8 %x, %x
define i8 @test1_yes(i8* %a, i8* %b) nounwind {
- %x = load i8* %a, !tbaa !5
+ %x = load i8, i8* %a, !tbaa !5
store i8 0, i8* %b
- %y = load i8* %a, !tbaa !5
+ %y = load i8, i8* %a, !tbaa !5
%z = add i8 %x, %y
ret i8 %z
}
@@ -37,9 +37,9 @@ define i8 @test1_yes(i8* %a, i8* %b) nounwind {
; CHECK: @test1_no
; CHECK: add i8 %x, %y
define i8 @test1_no(i8* %a, i8* %b) nounwind {
- %x = load i8* %a, !tbaa !6
+ %x = load i8, i8* %a, !tbaa !6
store i8 0, i8* %b
- %y = load i8* %a, !tbaa !6
+ %y = load i8, i8* %a, !tbaa !6
%z = add i8 %x, %y
ret i8 %z
}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll b/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll
index 31f775e..a7987f7 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll
@@ -6,8 +6,8 @@ target datalayout = "E-p:64:64:64"
; CHECK-NOT: alloca
define internal i32 @test(i32* %X, i32* %Y, i32* %Q) {
store i32 77, i32* %Q, !tbaa !2
- %A = load i32* %X, !tbaa !1
- %B = load i32* %Y, !tbaa !1
+ %A = load i32, i32* %X, !tbaa !1
+ %B = load i32, i32* %Y, !tbaa !1
%C = add i32 %A, %B
ret i32 %C
}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/cyclic.ll b/test/Analysis/TypeBasedAliasAnalysis/cyclic.ll
new file mode 100644
index 0000000..a88e26c
--- /dev/null
+++ b/test/Analysis/TypeBasedAliasAnalysis/cyclic.ll
@@ -0,0 +1,26 @@
+; RUN: not opt -instcombine < %s 2>&1 | FileCheck %s
+; CHECK: Cycle found in TBAA metadata.
+
+define void @test6(i32* %gi) #0 {
+entry:
+ store i32 42, i32* %gi, align 4, !tbaa !0
+ br label %for.cond
+
+for.cond: ; preds = %for.body, %entry
+ br i1 undef, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ store i32 undef, i32* %gi, align 4, !tbaa !2
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ ret void
+}
+
+attributes #0 = { nounwind ssp uwtable }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"Simple C/C++ TBAA"}
+!2 = distinct !{!3, !2, i64 0}
+!3 = !{!"int", !4}
+!4 = !{!"omnipotent ", !1}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/dse.ll b/test/Analysis/TypeBasedAliasAnalysis/dse.ll
index 09f8feb..b6dc9b2 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/dse.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/dse.ll
@@ -4,47 +4,47 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; DSE should make use of TBAA.
; CHECK: @test0_yes
-; CHECK-NEXT: load i8* %b
+; CHECK-NEXT: load i8, i8* %b
; CHECK-NEXT: store i8 1, i8* %a
; CHECK-NEXT: ret i8 %y
define i8 @test0_yes(i8* %a, i8* %b) nounwind {
store i8 0, i8* %a, !tbaa !1
- %y = load i8* %b, !tbaa !2
+ %y = load i8, i8* %b, !tbaa !2
store i8 1, i8* %a, !tbaa !1
ret i8 %y
}
; CHECK: @test0_no
; CHECK-NEXT: store i8 0, i8* %a
-; CHECK-NEXT: load i8* %b
+; CHECK-NEXT: load i8, i8* %b
; CHECK-NEXT: store i8 1, i8* %a
; CHECK-NEXT: ret i8 %y
define i8 @test0_no(i8* %a, i8* %b) nounwind {
store i8 0, i8* %a, !tbaa !3
- %y = load i8* %b, !tbaa !4
+ %y = load i8, i8* %b, !tbaa !4
store i8 1, i8* %a, !tbaa !3
ret i8 %y
}
; CHECK: @test1_yes
-; CHECK-NEXT: load i8* %b
+; CHECK-NEXT: load i8, i8* %b
; CHECK-NEXT: store i8 1, i8* %a
; CHECK-NEXT: ret i8 %y
define i8 @test1_yes(i8* %a, i8* %b) nounwind {
store i8 0, i8* %a
- %y = load i8* %b, !tbaa !5
+ %y = load i8, i8* %b, !tbaa !5
store i8 1, i8* %a
ret i8 %y
}
; CHECK: @test1_no
; CHECK-NEXT: store i8 0, i8* %a
-; CHECK-NEXT: load i8* %b
+; CHECK-NEXT: load i8, i8* %b
; CHECK-NEXT: store i8 1, i8* %a
; CHECK-NEXT: ret i8 %y
define i8 @test1_no(i8* %a, i8* %b) nounwind {
store i8 0, i8* %a
- %y = load i8* %b, !tbaa !6
+ %y = load i8, i8* %b, !tbaa !6
store i8 1, i8* %a
ret i8 %y
}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll b/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll
index 732f5d7..afc83c9 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll
@@ -12,8 +12,8 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; CHECK: define void @vrlh(
; CHECK: for.end:
-; CHECK: %arrayidx31 = getelementptr inbounds %union.vector_t* %t, i64 0, i32 0, i64 1
-; CHECK: %tmp32 = load i64* %arrayidx31, align 8, !tbaa [[TAG:!.*]]
+; CHECK: %arrayidx31 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 1
+; CHECK: %tmp32 = load i64, i64* %arrayidx31, align 8, !tbaa [[TAG:!.*]]
define void @vrlh(%union.vector_t* %va, %union.vector_t* %vb, %union.vector_t* %vd) nounwind {
entry:
@@ -25,22 +25,22 @@ for.body: ; preds = %entry, %for.body
%sub = sub nsw i32 7, %i.01
%idxprom = sext i32 %sub to i64
%half = bitcast %union.vector_t* %vb to [8 x i16]*
- %arrayidx = getelementptr inbounds [8 x i16]* %half, i64 0, i64 %idxprom
- %tmp4 = load i16* %arrayidx, align 2, !tbaa !0
+ %arrayidx = getelementptr inbounds [8 x i16], [8 x i16]* %half, i64 0, i64 %idxprom
+ %tmp4 = load i16, i16* %arrayidx, align 2, !tbaa !0
%conv = zext i16 %tmp4 to i32
%and = and i32 %conv, 15
%sub6 = sub nsw i32 7, %i.01
%idxprom7 = sext i32 %sub6 to i64
%half9 = bitcast %union.vector_t* %va to [8 x i16]*
- %arrayidx10 = getelementptr inbounds [8 x i16]* %half9, i64 0, i64 %idxprom7
- %tmp11 = load i16* %arrayidx10, align 2, !tbaa !0
+ %arrayidx10 = getelementptr inbounds [8 x i16], [8 x i16]* %half9, i64 0, i64 %idxprom7
+ %tmp11 = load i16, i16* %arrayidx10, align 2, !tbaa !0
%conv12 = zext i16 %tmp11 to i32
%shl = shl i32 %conv12, %and
%sub15 = sub nsw i32 7, %i.01
%idxprom16 = sext i32 %sub15 to i64
%half18 = bitcast %union.vector_t* %va to [8 x i16]*
- %arrayidx19 = getelementptr inbounds [8 x i16]* %half18, i64 0, i64 %idxprom16
- %tmp20 = load i16* %arrayidx19, align 2, !tbaa !0
+ %arrayidx19 = getelementptr inbounds [8 x i16], [8 x i16]* %half18, i64 0, i64 %idxprom16
+ %tmp20 = load i16, i16* %arrayidx19, align 2, !tbaa !0
%conv21 = zext i16 %tmp20 to i32
%sub23 = sub nsw i32 16, %and
%shr = lshr i32 %conv21, %sub23
@@ -49,20 +49,20 @@ for.body: ; preds = %entry, %for.body
%sub26 = sub nsw i32 7, %i.01
%idxprom27 = sext i32 %sub26 to i64
%half28 = bitcast %union.vector_t* %t to [8 x i16]*
- %arrayidx29 = getelementptr inbounds [8 x i16]* %half28, i64 0, i64 %idxprom27
+ %arrayidx29 = getelementptr inbounds [8 x i16], [8 x i16]* %half28, i64 0, i64 %idxprom27
store i16 %conv24, i16* %arrayidx29, align 2, !tbaa !0
%inc = add nsw i32 %i.01, 1
%cmp = icmp slt i32 %inc, 8
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
- %arrayidx31 = getelementptr inbounds %union.vector_t* %t, i64 0, i32 0, i64 1
- %tmp32 = load i64* %arrayidx31, align 8, !tbaa !3
- %arrayidx35 = getelementptr inbounds %union.vector_t* %vd, i64 0, i32 0, i64 1
+ %arrayidx31 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 1
+ %tmp32 = load i64, i64* %arrayidx31, align 8, !tbaa !3
+ %arrayidx35 = getelementptr inbounds %union.vector_t, %union.vector_t* %vd, i64 0, i32 0, i64 1
store i64 %tmp32, i64* %arrayidx35, align 8, !tbaa !3
- %arrayidx37 = getelementptr inbounds %union.vector_t* %t, i64 0, i32 0, i64 0
- %tmp38 = load i64* %arrayidx37, align 8, !tbaa !3
- %arrayidx41 = getelementptr inbounds %union.vector_t* %vd, i64 0, i32 0, i64 0
+ %arrayidx37 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 0
+ %tmp38 = load i64, i64* %arrayidx37, align 8, !tbaa !3
+ %arrayidx41 = getelementptr inbounds %union.vector_t, %union.vector_t* %vd, i64 0, i32 0, i64 0
store i64 %tmp38, i64* %arrayidx41, align 8, !tbaa !3
ret void
}
@@ -75,14 +75,14 @@ for.end: ; preds = %for.body
define i32 @test0(%struct.X* %a) nounwind {
entry:
- %i = getelementptr inbounds %struct.X* %a, i64 0, i32 0
+ %i = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 0
store i32 0, i32* %i, align 4, !tbaa !4
br label %for.body
for.body: ; preds = %entry, %for.body
%i2.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %f = getelementptr inbounds %struct.X* %a, i64 %i2.01, i32 1
- %tmp6 = load float* %f, align 4, !tbaa !5
+ %f = getelementptr inbounds %struct.X, %struct.X* %a, i64 %i2.01, i32 1
+ %tmp6 = load float, float* %f, align 4, !tbaa !5
%mul = fmul float %tmp6, 0x40019999A0000000
store float %mul, float* %f, align 4, !tbaa !5
%inc = add nsw i64 %i2.01, 1
@@ -90,8 +90,8 @@ for.body: ; preds = %entry, %for.body
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
- %i9 = getelementptr inbounds %struct.X* %a, i64 0, i32 0
- %tmp10 = load i32* %i9, align 4, !tbaa !4
+ %i9 = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 0
+ %tmp10 = load i32, i32* %i9, align 4, !tbaa !4
ret i32 %tmp10
}
@@ -103,14 +103,14 @@ for.end: ; preds = %for.body
define float @test1(%struct.X* %a) nounwind {
entry:
- %f = getelementptr inbounds %struct.X* %a, i64 0, i32 1
+ %f = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 1
store float 0x3FD3333340000000, float* %f, align 4, !tbaa !5
br label %for.body
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %i5 = getelementptr inbounds %struct.X* %a, i64 %i.01, i32 0
- %tmp6 = load i32* %i5, align 4, !tbaa !4
+ %i5 = getelementptr inbounds %struct.X, %struct.X* %a, i64 %i.01, i32 0
+ %tmp6 = load i32, i32* %i5, align 4, !tbaa !4
%mul = mul nsw i32 %tmp6, 3
store i32 %mul, i32* %i5, align 4, !tbaa !4
%inc = add nsw i64 %i.01, 1
@@ -118,8 +118,8 @@ for.body: ; preds = %entry, %for.body
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
- %f9 = getelementptr inbounds %struct.X* %a, i64 0, i32 1
- %tmp10 = load float* %f9, align 4, !tbaa !5
+ %f9 = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 1
+ %tmp10 = load float, float* %f9, align 4, !tbaa !5
ret float %tmp10
}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll b/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll
index edea6d0..aaa43a4 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll
@@ -17,7 +17,7 @@ entry:
br i1 %c, label %if.else, label %if.then
if.then:
- %t = load i32* %p, !tbaa !1
+ %t = load i32, i32* %p, !tbaa !1
store i32 %t, i32* %q
ret void
@@ -32,11 +32,11 @@ if.else:
; CHECK: @watch_out_for_type_change
; CHECK: if.then:
-; CHECK: %t = load i32* %p
+; CHECK: %t = load i32, i32* %p
; CHECK: store i32 %t, i32* %q
; CHECK: ret void
; CHECK: if.else:
-; CHECK: %u = load i32* %p
+; CHECK: %u = load i32, i32* %p
; CHECK: store i32 %u, i32* %q
define void @watch_out_for_type_change(i1 %c, i32* %p, i32* %p1, i32* %q) nounwind {
@@ -46,12 +46,12 @@ entry:
br i1 %c, label %if.else, label %if.then
if.then:
- %t = load i32* %p, !tbaa !3
+ %t = load i32, i32* %p, !tbaa !3
store i32 %t, i32* %q
ret void
if.else:
- %u = load i32* %p, !tbaa !4
+ %u = load i32, i32* %p, !tbaa !4
store i32 %u, i32* %q
ret void
}
@@ -64,7 +64,7 @@ if.else:
; CHECK: store i32 0, i32* %q
; CHECK: ret void
; CHECK: if.else:
-; CHECK: %u = load i32* %p
+; CHECK: %u = load i32, i32* %p
; CHECK: store i32 %u, i32* %q
define void @watch_out_for_another_type_change(i1 %c, i32* %p, i32* %p1, i32* %q) nounwind {
@@ -74,12 +74,12 @@ entry:
br i1 %c, label %if.else, label %if.then
if.then:
- %t = load i32* %p, !tbaa !4
+ %t = load i32, i32* %p, !tbaa !4
store i32 %t, i32* %q
ret void
if.else:
- %u = load i32* %p, !tbaa !3
+ %u = load i32, i32* %p, !tbaa !3
store i32 %u, i32* %q
ret void
}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/licm.ll b/test/Analysis/TypeBasedAliasAnalysis/licm.ll
index 0722a2c..fe07730 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/licm.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/licm.ll
@@ -5,7 +5,7 @@
; CHECK: @foo
; CHECK: entry:
-; CHECK-NEXT: %tmp3 = load double** @P, !tbaa !0
+; CHECK-NEXT: %tmp3 = load double*, double** @P, !tbaa !0
; CHECK-NEXT: br label %for.body
@P = common global double* null
@@ -16,9 +16,9 @@ entry:
for.body: ; preds = %entry, %for.body
%i.07 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %tmp3 = load double** @P, !tbaa !1
- %scevgep = getelementptr double* %tmp3, i64 %i.07
- %tmp4 = load double* %scevgep, !tbaa !2
+ %tmp3 = load double*, double** @P, !tbaa !1
+ %scevgep = getelementptr double, double* %tmp3, i64 %i.07
+ %tmp4 = load double, double* %scevgep, !tbaa !2
%mul = fmul double %tmp4, 2.300000e+00
store double %mul, double* %scevgep, !tbaa !2
%inc = add i64 %i.07, 1
@@ -49,9 +49,9 @@ entry:
br label %loop
loop:
- %tmp51 = load i8** %p, !tbaa !4
+ %tmp51 = load i8*, i8** %p, !tbaa !4
store i8* %tmp51, i8** %p
- %tmp40 = load i8* %q, !tbaa !5
+ %tmp40 = load i8, i8* %q, !tbaa !5
store i8 %tmp40, i8* %q
br label %loop
}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll b/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll
index fd05dbe..aa91020 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll
@@ -33,20 +33,20 @@ entry:
%call = call noalias i8* @_Znwm(i64 8)
%0 = bitcast i8* %call to %struct.Foo*
store %struct.Foo* %0, %struct.Foo** %f, align 8, !tbaa !4
- %1 = load %struct.Foo** %f, align 8, !tbaa !4
- %i = getelementptr inbounds %struct.Foo* %1, i32 0, i32 0
+ %1 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4
+ %i = getelementptr inbounds %struct.Foo, %struct.Foo* %1, i32 0, i32 0
store i64 1, i64* %i, align 8, !tbaa !6
store i32 0, i32* %i1, align 4, !tbaa !0
br label %for.cond
for.cond:
- %2 = load i32* %i1, align 4, !tbaa !0
- %3 = load i32* %n.addr, align 4, !tbaa !0
+ %2 = load i32, i32* %i1, align 4, !tbaa !0
+ %3 = load i32, i32* %n.addr, align 4, !tbaa !0
%cmp = icmp slt i32 %2, %3
br i1 %cmp, label %for.body, label %for.end
for.body:
- %4 = load %struct.Foo** %f, align 8, !tbaa !4
+ %4 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4
%5 = bitcast %struct.Foo* %4 to i8*
%new.isnull = icmp eq i8* %5, null
br i1 %new.isnull, label %new.cont, label %new.notnull
@@ -58,10 +58,10 @@ new.notnull:
new.cont:
%7 = phi %struct.Bar* [ %6, %new.notnull ], [ null, %for.body ]
store %struct.Bar* %7, %struct.Bar** %b, align 8, !tbaa !4
- %8 = load %struct.Bar** %b, align 8, !tbaa !4
- %p = getelementptr inbounds %struct.Bar* %8, i32 0, i32 0
+ %8 = load %struct.Bar*, %struct.Bar** %b, align 8, !tbaa !4
+ %p = getelementptr inbounds %struct.Bar, %struct.Bar* %8, i32 0, i32 0
store i8* null, i8** %p, align 8, !tbaa !9
- %9 = load %struct.Foo** %f, align 8, !tbaa !4
+ %9 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4
%10 = bitcast %struct.Foo* %9 to i8*
%new.isnull2 = icmp eq i8* %10, null
br i1 %new.isnull2, label %new.cont4, label %new.notnull3
@@ -73,23 +73,23 @@ new.notnull3:
new.cont4:
%12 = phi %struct.Foo* [ %11, %new.notnull3 ], [ null, %new.cont ]
store %struct.Foo* %12, %struct.Foo** %f, align 8, !tbaa !4
- %13 = load i32* %i1, align 4, !tbaa !0
+ %13 = load i32, i32* %i1, align 4, !tbaa !0
%conv = sext i32 %13 to i64
- %14 = load %struct.Foo** %f, align 8, !tbaa !4
- %i5 = getelementptr inbounds %struct.Foo* %14, i32 0, i32 0
+ %14 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4
+ %i5 = getelementptr inbounds %struct.Foo, %struct.Foo* %14, i32 0, i32 0
store i64 %conv, i64* %i5, align 8, !tbaa !6
br label %for.inc
for.inc:
- %15 = load i32* %i1, align 4, !tbaa !0
+ %15 = load i32, i32* %i1, align 4, !tbaa !0
%inc = add nsw i32 %15, 1
store i32 %inc, i32* %i1, align 4, !tbaa !0
br label %for.cond
for.end:
- %16 = load %struct.Foo** %f, align 8, !tbaa !4
- %i6 = getelementptr inbounds %struct.Foo* %16, i32 0, i32 0
- %17 = load i64* %i6, align 8, !tbaa !6
+ %16 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4
+ %i6 = getelementptr inbounds %struct.Foo, %struct.Foo* %16, i32 0, i32 0
+ %17 = load i64, i64* %i6, align 8, !tbaa !6
ret i64 %17
}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/precedence.ll b/test/Analysis/TypeBasedAliasAnalysis/precedence.ll
index 0b697b2..b2931ca 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/precedence.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/precedence.ll
@@ -18,7 +18,7 @@ entry:
store i32 0, i32* %x, !tbaa !0
%0 = bitcast i32* %x to float*
store float 0x4002666660000000, float* %0, !tbaa !3
- %tmp3 = load i32* %x, !tbaa !0
+ %tmp3 = load i32, i32* %x, !tbaa !0
ret i32 %tmp3
}
@@ -33,9 +33,9 @@ define i64 @offset(i64* %x) nounwind {
entry:
store i64 0, i64* %x, !tbaa !4
%0 = bitcast i64* %x to i8*
- %1 = getelementptr i8* %0, i64 1
+ %1 = getelementptr i8, i8* %0, i64 1
store i8 1, i8* %1, !tbaa !5
- %tmp3 = load i64* %x, !tbaa !4
+ %tmp3 = load i64, i64* %x, !tbaa !4
ret i64 %tmp3
}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/sink.ll b/test/Analysis/TypeBasedAliasAnalysis/sink.ll
index 1a124b8..c95dc15 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/sink.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/sink.ll
@@ -1,11 +1,11 @@
; RUN: opt -tbaa -sink -S < %s | FileCheck %s
; CHECK: a:
-; CHECK: %f = load float* %p, !tbaa [[TAGA:!.*]]
+; CHECK: %f = load float, float* %p, !tbaa [[TAGA:!.*]]
; CHECK: store float %f, float* %q
define void @foo(float* %p, i1 %c, float* %q, float* %r) {
- %f = load float* %p, !tbaa !0
+ %f = load float, float* %p, !tbaa !0
store float 0.0, float* %r, !tbaa !1
br i1 %c, label %a, label %b
a:
diff --git a/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll b/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
index 3c035af..a2e4dc6 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
@@ -17,7 +17,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i32 4
-; OPT: %[[RET:.*]] = load i32*
+; OPT: %[[RET:.*]] = load i32, i32*
; OPT: ret i32 %[[RET]]
%s.addr = alloca i32*, align 8
%A.addr = alloca %struct.StructA*, align 8
@@ -25,13 +25,13 @@ entry:
store i32* %s, i32** %s.addr, align 8, !tbaa !0
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load i32** %s.addr, align 8, !tbaa !0
+ %0 = load i32*, i32** %s.addr, align 8, !tbaa !0
store i32 1, i32* %0, align 4, !tbaa !6
- %1 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %1, i32 0, i32 1
+ %1 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %1, i32 0, i32 1
store i32 4, i32* %f32, align 4, !tbaa !8
- %2 = load i32** %s.addr, align 8, !tbaa !0
- %3 = load i32* %2, align 4, !tbaa !6
+ %2 = load i32*, i32** %s.addr, align 8, !tbaa !0
+ %3 = load i32, i32* %2, align 4, !tbaa !6
ret i32 %3
}
@@ -51,13 +51,13 @@ entry:
store i32* %s, i32** %s.addr, align 8, !tbaa !0
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load i32** %s.addr, align 8, !tbaa !0
+ %0 = load i32*, i32** %s.addr, align 8, !tbaa !0
store i32 1, i32* %0, align 4, !tbaa !6
- %1 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f16 = getelementptr inbounds %struct.StructA* %1, i32 0, i32 0
+ %1 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f16 = getelementptr inbounds %struct.StructA, %struct.StructA* %1, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !11
- %2 = load i32** %s.addr, align 8, !tbaa !0
- %3 = load i32* %2, align 4, !tbaa !6
+ %2 = load i32*, i32** %s.addr, align 8, !tbaa !0
+ %3 = load i32, i32* %2, align 4, !tbaa !6
ret i32 %3
}
@@ -69,7 +69,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i32 4
-; OPT: %[[RET:.*]] = load i32*
+; OPT: %[[RET:.*]] = load i32, i32*
; OPT: ret i32 %[[RET]]
%A.addr = alloca %struct.StructA*, align 8
%B.addr = alloca %struct.StructB*, align 8
@@ -77,16 +77,16 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
- %a = getelementptr inbounds %struct.StructB* %1, i32 0, i32 1
- %f321 = getelementptr inbounds %struct.StructA* %a, i32 0, i32 1
+ %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0
+ %a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1
+ %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1
store i32 4, i32* %f321, align 4, !tbaa !12
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f322 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f322, align 4, !tbaa !8
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
+ %3 = load i32, i32* %f322, align 4, !tbaa !8
ret i32 %3
}
@@ -106,16 +106,16 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
- %a = getelementptr inbounds %struct.StructB* %1, i32 0, i32 1
- %f16 = getelementptr inbounds %struct.StructA* %a, i32 0, i32 0
+ %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0
+ %a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1
+ %f16 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !14
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f321, align 4, !tbaa !8
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
+ %3 = load i32, i32* %f321, align 4, !tbaa !8
ret i32 %3
}
@@ -135,15 +135,15 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructB* %1, i32 0, i32 2
+ %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0
+ %f321 = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 2
store i32 4, i32* %f321, align 4, !tbaa !15
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f322 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f322, align 4, !tbaa !8
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
+ %3 = load i32, i32* %f322, align 4, !tbaa !8
ret i32 %3
}
@@ -163,16 +163,16 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
- %a = getelementptr inbounds %struct.StructB* %1, i32 0, i32 1
- %f32_2 = getelementptr inbounds %struct.StructA* %a, i32 0, i32 3
+ %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0
+ %a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1
+ %f32_2 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 3
store i32 4, i32* %f32_2, align 4, !tbaa !16
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f321, align 4, !tbaa !8
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
+ %3 = load i32, i32* %f321, align 4, !tbaa !8
ret i32 %3
}
@@ -192,15 +192,15 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructS* %1, i32 0, i32 1
+ %1 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
+ %f321 = getelementptr inbounds %struct.StructS, %struct.StructS* %1, i32 0, i32 1
store i32 4, i32* %f321, align 4, !tbaa !17
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f322 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f322, align 4, !tbaa !8
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
+ %3 = load i32, i32* %f322, align 4, !tbaa !8
ret i32 %3
}
@@ -220,15 +220,15 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f16 = getelementptr inbounds %struct.StructS* %1, i32 0, i32 0
+ %1 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
+ %f16 = getelementptr inbounds %struct.StructS, %struct.StructS* %1, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !19
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f321, align 4, !tbaa !8
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
+ %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
+ %3 = load i32, i32* %f321, align 4, !tbaa !8
ret i32 %3
}
@@ -248,15 +248,15 @@ entry:
store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0
store %struct.StructS2* %S2, %struct.StructS2** %S2.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructS* %0, i32 0, i32 1
+ %0 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
+ %f32 = getelementptr inbounds %struct.StructS, %struct.StructS* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !17
- %1 = load %struct.StructS2** %S2.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructS2* %1, i32 0, i32 1
+ %1 = load %struct.StructS2*, %struct.StructS2** %S2.addr, align 8, !tbaa !0
+ %f321 = getelementptr inbounds %struct.StructS2, %struct.StructS2* %1, i32 0, i32 1
store i32 4, i32* %f321, align 4, !tbaa !20
- %2 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f322 = getelementptr inbounds %struct.StructS* %2, i32 0, i32 1
- %3 = load i32* %f322, align 4, !tbaa !17
+ %2 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
+ %f322 = getelementptr inbounds %struct.StructS, %struct.StructS* %2, i32 0, i32 1
+ %3 = load i32, i32* %f322, align 4, !tbaa !17
ret i32 %3
}
@@ -276,15 +276,15 @@ entry:
store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0
store %struct.StructS2* %S2, %struct.StructS2** %S2.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructS* %0, i32 0, i32 1
+ %0 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
+ %f32 = getelementptr inbounds %struct.StructS, %struct.StructS* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !17
- %1 = load %struct.StructS2** %S2.addr, align 8, !tbaa !0
- %f16 = getelementptr inbounds %struct.StructS2* %1, i32 0, i32 0
+ %1 = load %struct.StructS2*, %struct.StructS2** %S2.addr, align 8, !tbaa !0
+ %f16 = getelementptr inbounds %struct.StructS2, %struct.StructS2* %1, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !22
- %2 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructS* %2, i32 0, i32 1
- %3 = load i32* %f321, align 4, !tbaa !17
+ %2 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
+ %f321 = getelementptr inbounds %struct.StructS, %struct.StructS* %2, i32 0, i32 1
+ %3 = load i32, i32* %f321, align 4, !tbaa !17
ret i32 %3
}
@@ -304,21 +304,21 @@ entry:
store %struct.StructC* %C, %struct.StructC** %C.addr, align 8, !tbaa !0
store %struct.StructD* %D, %struct.StructD** %D.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructC** %C.addr, align 8, !tbaa !0
- %b = getelementptr inbounds %struct.StructC* %0, i32 0, i32 1
- %a = getelementptr inbounds %struct.StructB* %b, i32 0, i32 1
- %f32 = getelementptr inbounds %struct.StructA* %a, i32 0, i32 1
+ %0 = load %struct.StructC*, %struct.StructC** %C.addr, align 8, !tbaa !0
+ %b = getelementptr inbounds %struct.StructC, %struct.StructC* %0, i32 0, i32 1
+ %a = getelementptr inbounds %struct.StructB, %struct.StructB* %b, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !23
- %1 = load %struct.StructD** %D.addr, align 8, !tbaa !0
- %b1 = getelementptr inbounds %struct.StructD* %1, i32 0, i32 1
- %a2 = getelementptr inbounds %struct.StructB* %b1, i32 0, i32 1
- %f323 = getelementptr inbounds %struct.StructA* %a2, i32 0, i32 1
+ %1 = load %struct.StructD*, %struct.StructD** %D.addr, align 8, !tbaa !0
+ %b1 = getelementptr inbounds %struct.StructD, %struct.StructD* %1, i32 0, i32 1
+ %a2 = getelementptr inbounds %struct.StructB, %struct.StructB* %b1, i32 0, i32 1
+ %f323 = getelementptr inbounds %struct.StructA, %struct.StructA* %a2, i32 0, i32 1
store i32 4, i32* %f323, align 4, !tbaa !25
- %2 = load %struct.StructC** %C.addr, align 8, !tbaa !0
- %b4 = getelementptr inbounds %struct.StructC* %2, i32 0, i32 1
- %a5 = getelementptr inbounds %struct.StructB* %b4, i32 0, i32 1
- %f326 = getelementptr inbounds %struct.StructA* %a5, i32 0, i32 1
- %3 = load i32* %f326, align 4, !tbaa !23
+ %2 = load %struct.StructC*, %struct.StructC** %C.addr, align 8, !tbaa !0
+ %b4 = getelementptr inbounds %struct.StructC, %struct.StructC* %2, i32 0, i32 1
+ %a5 = getelementptr inbounds %struct.StructB, %struct.StructB* %b4, i32 0, i32 1
+ %f326 = getelementptr inbounds %struct.StructA, %struct.StructA* %a5, i32 0, i32 1
+ %3 = load i32, i32* %f326, align 4, !tbaa !23
ret i32 %3
}
@@ -330,7 +330,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i32 4
-; OPT: %[[RET:.*]] = load i32*
+; OPT: %[[RET:.*]] = load i32, i32*
; OPT: ret i32 %[[RET]]
%C.addr = alloca %struct.StructC*, align 8
%D.addr = alloca %struct.StructD*, align 8
@@ -340,24 +340,24 @@ entry:
store %struct.StructC* %C, %struct.StructC** %C.addr, align 8, !tbaa !0
store %struct.StructD* %D, %struct.StructD** %D.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructC** %C.addr, align 8, !tbaa !0
- %b = getelementptr inbounds %struct.StructC* %0, i32 0, i32 1
+ %0 = load %struct.StructC*, %struct.StructC** %C.addr, align 8, !tbaa !0
+ %b = getelementptr inbounds %struct.StructC, %struct.StructC* %0, i32 0, i32 1
store %struct.StructB* %b, %struct.StructB** %b1, align 8, !tbaa !0
- %1 = load %struct.StructD** %D.addr, align 8, !tbaa !0
- %b3 = getelementptr inbounds %struct.StructD* %1, i32 0, i32 1
+ %1 = load %struct.StructD*, %struct.StructD** %D.addr, align 8, !tbaa !0
+ %b3 = getelementptr inbounds %struct.StructD, %struct.StructD* %1, i32 0, i32 1
store %struct.StructB* %b3, %struct.StructB** %b2, align 8, !tbaa !0
- %2 = load %struct.StructB** %b1, align 8, !tbaa !0
- %a = getelementptr inbounds %struct.StructB* %2, i32 0, i32 1
- %f32 = getelementptr inbounds %struct.StructA* %a, i32 0, i32 1
+ %2 = load %struct.StructB*, %struct.StructB** %b1, align 8, !tbaa !0
+ %a = getelementptr inbounds %struct.StructB, %struct.StructB* %2, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !12
- %3 = load %struct.StructB** %b2, align 8, !tbaa !0
- %a4 = getelementptr inbounds %struct.StructB* %3, i32 0, i32 1
- %f325 = getelementptr inbounds %struct.StructA* %a4, i32 0, i32 1
+ %3 = load %struct.StructB*, %struct.StructB** %b2, align 8, !tbaa !0
+ %a4 = getelementptr inbounds %struct.StructB, %struct.StructB* %3, i32 0, i32 1
+ %f325 = getelementptr inbounds %struct.StructA, %struct.StructA* %a4, i32 0, i32 1
store i32 4, i32* %f325, align 4, !tbaa !12
- %4 = load %struct.StructB** %b1, align 8, !tbaa !0
- %a6 = getelementptr inbounds %struct.StructB* %4, i32 0, i32 1
- %f327 = getelementptr inbounds %struct.StructA* %a6, i32 0, i32 1
- %5 = load i32* %f327, align 4, !tbaa !12
+ %4 = load %struct.StructB*, %struct.StructB** %b1, align 8, !tbaa !0
+ %a6 = getelementptr inbounds %struct.StructB, %struct.StructB* %4, i32 0, i32 1
+ %f327 = getelementptr inbounds %struct.StructA, %struct.StructA* %a6, i32 0, i32 1
+ %5 = load i32, i32* %f327, align 4, !tbaa !12
ret i32 %5
}
diff --git a/test/Analysis/ValueTracking/memory-dereferenceable.ll b/test/Analysis/ValueTracking/memory-dereferenceable.ll
index 1c55efc..51f9265 100644
--- a/test/Analysis/ValueTracking/memory-dereferenceable.ll
+++ b/test/Analysis/ValueTracking/memory-dereferenceable.ll
@@ -17,16 +17,16 @@ define void @test(i32 addrspace(1)* dereferenceable(8) %dparam) {
; CHECK: %relocate
; CHECK-NOT: %nparam
entry:
- %globalptr = getelementptr inbounds [6 x i8]* @globalstr, i32 0, i32 0
- %load1 = load i8* %globalptr
+ %globalptr = getelementptr inbounds [6 x i8], [6 x i8]* @globalstr, i32 0, i32 0
+ %load1 = load i8, i8* %globalptr
%alloca = alloca i1
- %load2 = load i1* %alloca
- %load3 = load i32 addrspace(1)* %dparam
+ %load2 = load i1, i1* %alloca
+ %load3 = load i32, i32 addrspace(1)* %dparam
%tok = tail call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 addrspace(1)* %dparam)
%relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %tok, i32 4, i32 4)
- %load4 = load i32 addrspace(1)* %relocate
- %nparam = getelementptr i32 addrspace(1)* %dparam, i32 5
- %load5 = load i32 addrspace(1)* %nparam
+ %load4 = load i32, i32 addrspace(1)* %relocate
+ %nparam = getelementptr i32, i32 addrspace(1)* %dparam, i32 5
+ %load5 = load i32, i32 addrspace(1)* %nparam
ret void
}
diff --git a/test/Analysis/ValueTracking/pr23011.ll b/test/Analysis/ValueTracking/pr23011.ll
new file mode 100644
index 0000000..9edc1c4
--- /dev/null
+++ b/test/Analysis/ValueTracking/pr23011.ll
@@ -0,0 +1,15 @@
+; RUN: opt -indvars -S < %s | FileCheck %s
+
+declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) nounwind readnone
+
+define i1 @test1(i8 %x) {
+ entry:
+; CHECK-LABEL: @test1
+ %rem = srem i8 %x, 15
+ %t = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %rem, i8 %rem)
+; CHECK: %t = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %rem, i8 %rem)
+; CHECK: %obit = extractvalue { i8, i1 } %t, 1
+; CHECK: ret i1 %obit
+ %obit = extractvalue { i8, i1 } %t, 1
+ ret i1 %obit
+}