aboutsummaryrefslogtreecommitdiffstats
path: root/test/Transforms/InstCombine
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/InstCombine')
-rw-r--r--test/Transforms/InstCombine/2002-03-11-InstCombineHang.ll11
-rw-r--r--test/Transforms/InstCombine/2002-05-14-SubFailure.ll10
-rw-r--r--test/Transforms/InstCombine/2002-05-14-TouchDeletedInst.ll510
-rw-r--r--test/Transforms/InstCombine/2002-08-02-CastTest.ll11
-rw-r--r--test/Transforms/InstCombine/2002-09-17-GetElementPtrCrash.ll12
-rw-r--r--test/Transforms/InstCombine/2002-12-05-MissedConstProp.ll10
-rw-r--r--test/Transforms/InstCombine/2003-05-26-CastMiscompile.ll7
-rw-r--r--test/Transforms/InstCombine/2003-05-27-ConstExprCrash.ll8
-rw-r--r--test/Transforms/InstCombine/2003-06-05-BranchInvertInfLoop.ll14
-rw-r--r--test/Transforms/InstCombine/2003-06-22-ConstantExprCrash.ll12
-rw-r--r--test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll47
-rw-r--r--test/Transforms/InstCombine/2003-08-12-AllocaNonNull.ll23
-rw-r--r--test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll5
-rw-r--r--test/Transforms/InstCombine/2003-10-23-InstcombineNullFail.ll13
-rw-r--r--test/Transforms/InstCombine/2003-10-29-CallSiteResolve.ll13
-rw-r--r--test/Transforms/InstCombine/2003-11-03-VarargsCallBug.ll13
-rw-r--r--test/Transforms/InstCombine/2003-11-13-ConstExprCastCall.ll9
-rw-r--r--test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll24
-rw-r--r--test/Transforms/InstCombine/2004-02-23-ShiftShiftOverflow.ll15
-rw-r--r--test/Transforms/InstCombine/2004-03-13-InstCombineInfLoop.ll13
-rw-r--r--test/Transforms/InstCombine/2004-04-04-InstCombineReplaceAllUsesWith.ll8
-rw-r--r--test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll9
-rw-r--r--test/Transforms/InstCombine/2004-07-27-ConstantExprMul.ll8
-rw-r--r--test/Transforms/InstCombine/2004-08-09-RemInfLoop.llx8
-rw-r--r--test/Transforms/InstCombine/2004-08-10-BoolSetCC.ll7
-rw-r--r--test/Transforms/InstCombine/2004-09-20-BadLoadCombine.llx21
-rw-r--r--test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.llx24
-rw-r--r--test/Transforms/InstCombine/2004-09-28-BadShiftAndSetCC.llx9
-rw-r--r--test/Transforms/InstCombine/2004-11-22-Missed-and-fold.ll8
-rw-r--r--test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll136
-rw-r--r--test/Transforms/InstCombine/2004-12-08-InstCombineCrash.ll14
-rw-r--r--test/Transforms/InstCombine/2004-12-08-RemInfiniteLoop.ll6
-rw-r--r--test/Transforms/InstCombine/2005-03-04-ShiftOverflow.ll9
-rw-r--r--test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll7
-rw-r--r--test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll9
-rw-r--r--test/Transforms/InstCombine/2005-06-15-ShiftSetCCCrash.ll8
-rw-r--r--test/Transforms/InstCombine/2005-06-16-RangeCrash.ll7
-rw-r--r--test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll16
-rw-r--r--test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll13
-rw-r--r--test/Transforms/InstCombine/2006-02-07-SextZextCrash.ll22
-rw-r--r--test/Transforms/InstCombine/2006-02-13-DemandedMiscompile.ll9
-rw-r--r--test/Transforms/InstCombine/2006-02-28-Crash.ll6
-rw-r--r--test/Transforms/InstCombine/2006-03-30-ExtractElement.ll7
-rw-r--r--test/Transforms/InstCombine/2006-04-01-InfLoop.ll442
-rw-r--r--test/Transforms/InstCombine/2006-04-28-ShiftShiftLongLong.ll10
-rw-r--r--test/Transforms/InstCombine/2006-05-04-DemandedBitCrash.ll51
-rw-r--r--test/Transforms/InstCombine/2006-05-06-Infloop.ll523
-rw-r--r--test/Transforms/InstCombine/2006-06-28-infloop.ll21
-rw-r--r--test/Transforms/InstCombine/2006-09-11-EmptyStructCrash.ll48
-rw-r--r--test/Transforms/InstCombine/2006-09-15-CastToBool.ll14
-rw-r--r--test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll10
-rw-r--r--test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst.ll10
-rw-r--r--test/Transforms/InstCombine/2006-10-20-mask.ll9
-rw-r--r--test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll9
-rw-r--r--test/Transforms/InstCombine/2006-11-03-Memmove64.ll19
-rw-r--r--test/Transforms/InstCombine/2006-11-10-ashr-miscompile.ll7
-rw-r--r--test/Transforms/InstCombine/2006-11-27-XorBug.ll12
-rw-r--r--test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll9
-rw-r--r--test/Transforms/InstCombine/2006-12-05-fp-to-int-ext.ll13
-rw-r--r--test/Transforms/InstCombine/2006-12-08-ICmp-Combining.ll17
-rw-r--r--test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll57
-rw-r--r--test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll44
-rw-r--r--test/Transforms/InstCombine/2006-12-10-ICmp-GEP-GEP.ll167
-rw-r--r--test/Transforms/InstCombine/2006-12-15-Range-Test.ll36
-rw-r--r--test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll35
-rw-r--r--test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll9
-rw-r--r--test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll6
-rw-r--r--test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll7
-rw-r--r--test/Transforms/InstCombine/2007-01-27-AndICmp.ll8
-rw-r--r--test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll45
-rw-r--r--test/Transforms/InstCombine/2007-02-07-PointerCast.ll26
-rw-r--r--test/Transforms/InstCombine/2007-02-23-PhiFoldInfLoop.ll31
-rw-r--r--test/Transforms/InstCombine/2007-03-13-CompareMerge.ll9
-rw-r--r--test/Transforms/InstCombine/2007-03-19-BadTruncChangePR1261.ll10
-rw-r--r--test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll7
-rw-r--r--test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll29
-rw-r--r--test/Transforms/InstCombine/2007-03-25-DoubleShift.ll9
-rw-r--r--test/Transforms/InstCombine/2007-03-26-BadShiftMask.ll36
-rw-r--r--test/Transforms/InstCombine/2007-03-27-PR1280.ll15
-rw-r--r--test/Transforms/InstCombine/2007-03-31-InfiniteLoop.ll302
-rw-r--r--test/Transforms/InstCombine/2007-04-04-BadFoldBitcastIntoMalloc.ll19
-rw-r--r--test/Transforms/InstCombine/2007-04-08-SingleEltVectorCrash.ll7
-rw-r--r--test/Transforms/InstCombine/2007-05-04-Crash.ll30
-rw-r--r--test/Transforms/InstCombine/2007-05-10-icmp-or.ll8
-rw-r--r--test/Transforms/InstCombine/2007-05-14-Crash.ll18
-rw-r--r--test/Transforms/InstCombine/2007-05-18-CastFoldBug.ll10
-rw-r--r--test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll22
-rw-r--r--test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll9
-rw-r--r--test/Transforms/InstCombine/CPP_min_max.llx36
-rw-r--r--test/Transforms/InstCombine/GEPIdxCanon.ll9
-rw-r--r--test/Transforms/InstCombine/IntPtrCast.ll9
-rw-r--r--test/Transforms/InstCombine/JavaCompare.ll15
-rw-r--r--test/Transforms/InstCombine/README.txt4
-rw-r--r--test/Transforms/InstCombine/add.ll251
-rw-r--r--test/Transforms/InstCombine/add2.ll11
-rw-r--r--test/Transforms/InstCombine/alloca.ll29
-rw-r--r--test/Transforms/InstCombine/and-compare.ll11
-rw-r--r--test/Transforms/InstCombine/and-or-and.ll56
-rw-r--r--test/Transforms/InstCombine/and-or-not.ll46
-rw-r--r--test/Transforms/InstCombine/and-xor-merge.ll19
-rw-r--r--test/Transforms/InstCombine/and.ll229
-rw-r--r--test/Transforms/InstCombine/apint-add1.ll34
-rw-r--r--test/Transforms/InstCombine/apint-add2.ll46
-rw-r--r--test/Transforms/InstCombine/apint-and-compare.ll16
-rw-r--r--test/Transforms/InstCombine/apint-and-or-and.ll50
-rw-r--r--test/Transforms/InstCombine/apint-and-xor-merge.ll22
-rw-r--r--test/Transforms/InstCombine/apint-and1.ll57
-rw-r--r--test/Transforms/InstCombine/apint-and2.ll82
-rw-r--r--test/Transforms/InstCombine/apint-call-cast-target.ll13
-rw-r--r--test/Transforms/InstCombine/apint-cast-and-cast.ll15
-rw-r--r--test/Transforms/InstCombine/apint-cast-cast-to-and.ll8
-rw-r--r--test/Transforms/InstCombine/apint-cast.ll20
-rw-r--r--test/Transforms/InstCombine/apint-div1.ll22
-rw-r--r--test/Transforms/InstCombine/apint-div2.ll22
-rw-r--r--test/Transforms/InstCombine/apint-elim-logicalops.ll39
-rw-r--r--test/Transforms/InstCombine/apint-mul1.ll11
-rw-r--r--test/Transforms/InstCombine/apint-mul2.ll12
-rw-r--r--test/Transforms/InstCombine/apint-not.ll42
-rw-r--r--test/Transforms/InstCombine/apint-or1.ll36
-rw-r--r--test/Transforms/InstCombine/apint-or2.ll35
-rw-r--r--test/Transforms/InstCombine/apint-rem1.ll22
-rw-r--r--test/Transforms/InstCombine/apint-rem2.ll22
-rw-r--r--test/Transforms/InstCombine/apint-select.ll44
-rw-r--r--test/Transforms/InstCombine/apint-shift-simplify.ll23
-rw-r--r--test/Transforms/InstCombine/apint-shift.ll191
-rw-r--r--test/Transforms/InstCombine/apint-shl-trunc.ll14
-rw-r--r--test/Transforms/InstCombine/apint-sub.ll139
-rw-r--r--test/Transforms/InstCombine/apint-xor1.ll50
-rw-r--r--test/Transforms/InstCombine/apint-xor2.ll51
-rw-r--r--test/Transforms/InstCombine/apint-zext1.ll9
-rw-r--r--test/Transforms/InstCombine/apint-zext2.ll9
-rw-r--r--test/Transforms/InstCombine/binop-cast.ll7
-rw-r--r--test/Transforms/InstCombine/bit-tracking.ll26
-rw-r--r--test/Transforms/InstCombine/bitcast-gep.ll19
-rw-r--r--test/Transforms/InstCombine/bitcount.ll17
-rw-r--r--test/Transforms/InstCombine/bittest.ll29
-rw-r--r--test/Transforms/InstCombine/bswap-fold.ll28
-rw-r--r--test/Transforms/InstCombine/bswap.ll62
-rw-r--r--test/Transforms/InstCombine/call-cast-target.ll16
-rw-r--r--test/Transforms/InstCombine/call-intrinsics.ll17
-rw-r--r--test/Transforms/InstCombine/call.ll58
-rw-r--r--test/Transforms/InstCombine/call2.ll27
-rw-r--r--test/Transforms/InstCombine/canonicalize_branch.ll28
-rw-r--r--test/Transforms/InstCombine/cast-and-cast.ll16
-rw-r--r--test/Transforms/InstCombine/cast-cast-to-and.ll9
-rw-r--r--test/Transforms/InstCombine/cast-load-gep.ll23
-rw-r--r--test/Transforms/InstCombine/cast-malloc.ll13
-rw-r--r--test/Transforms/InstCombine/cast-propagate.ll10
-rw-r--r--test/Transforms/InstCombine/cast-set.ll49
-rw-r--r--test/Transforms/InstCombine/cast.ll230
-rw-r--r--test/Transforms/InstCombine/cast2.ll29
-rw-r--r--test/Transforms/InstCombine/cast_ptr.ll20
-rw-r--r--test/Transforms/InstCombine/deadcode.ll13
-rw-r--r--test/Transforms/InstCombine/dg.exp3
-rw-r--r--test/Transforms/InstCombine/div.ll69
-rw-r--r--test/Transforms/InstCombine/fpcast.ll14
-rw-r--r--test/Transforms/InstCombine/getelementptr-setcc.ll34
-rw-r--r--test/Transforms/InstCombine/getelementptr.ll76
-rw-r--r--test/Transforms/InstCombine/getelementptr_cast.ll11
-rw-r--r--test/Transforms/InstCombine/getelementptr_const.ll14
-rw-r--r--test/Transforms/InstCombine/getelementptr_index.ll10
-rw-r--r--test/Transforms/InstCombine/hoist_instr.ll17
-rw-r--r--test/Transforms/InstCombine/icmp.ll31
-rw-r--r--test/Transforms/InstCombine/load.ll74
-rw-r--r--test/Transforms/InstCombine/malloc-free-delete.ll11
-rw-r--r--test/Transforms/InstCombine/malloc.ll7
-rw-r--r--test/Transforms/InstCombine/malloc2.ll19
-rw-r--r--test/Transforms/InstCombine/memmove.ll23
-rw-r--r--test/Transforms/InstCombine/mul.ll74
-rw-r--r--test/Transforms/InstCombine/narrow.ll17
-rw-r--r--test/Transforms/InstCombine/not.ll45
-rw-r--r--test/Transforms/InstCombine/or.ll158
-rw-r--r--test/Transforms/InstCombine/or2.ll10
-rw-r--r--test/Transforms/InstCombine/phi.ll78
-rw-r--r--test/Transforms/InstCombine/rem.ll79
-rw-r--r--test/Transforms/InstCombine/select.ll182
-rw-r--r--test/Transforms/InstCombine/set.ll152
-rw-r--r--test/Transforms/InstCombine/setcc-cast-cast.ll45
-rw-r--r--test/Transforms/InstCombine/setcc-strength-reduce.ll32
-rw-r--r--test/Transforms/InstCombine/shift-simplify.ll42
-rw-r--r--test/Transforms/InstCombine/shift-sra.ll17
-rw-r--r--test/Transforms/InstCombine/shift.ll189
-rw-r--r--test/Transforms/InstCombine/shl-trunc.ll7
-rw-r--r--test/Transforms/InstCombine/signext.ll44
-rw-r--r--test/Transforms/InstCombine/sink_instruction.ll18
-rw-r--r--test/Transforms/InstCombine/stacksaverestore.ll19
-rw-r--r--test/Transforms/InstCombine/store-merge.ll37
-rw-r--r--test/Transforms/InstCombine/store.ll16
-rw-r--r--test/Transforms/InstCombine/sub.ll139
-rw-r--r--test/Transforms/InstCombine/udiv_select_to_select_shift.ll17
-rw-r--r--test/Transforms/InstCombine/vec_demanded_elts.ll47
-rw-r--r--test/Transforms/InstCombine/vec_extract_elt.ll9
-rw-r--r--test/Transforms/InstCombine/vec_insert_to_shuffle.ll18
-rw-r--r--test/Transforms/InstCombine/vec_insertelt.ll7
-rw-r--r--test/Transforms/InstCombine/vec_narrow.ll12
-rw-r--r--test/Transforms/InstCombine/vec_shuffle.ll47
-rw-r--r--test/Transforms/InstCombine/xor.ll198
-rw-r--r--test/Transforms/InstCombine/xor2.ll17
-rw-r--r--test/Transforms/InstCombine/zeroext-and-reduce.ll9
-rw-r--r--test/Transforms/InstCombine/zext.ll9
200 files changed, 8216 insertions, 0 deletions
diff --git a/test/Transforms/InstCombine/2002-03-11-InstCombineHang.ll b/test/Transforms/InstCombine/2002-03-11-InstCombineHang.ll
new file mode 100644
index 0000000..dd683a3
--- /dev/null
+++ b/test/Transforms/InstCombine/2002-03-11-InstCombineHang.ll
@@ -0,0 +1,11 @@
+; This testcase causes instcombine to hang.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine
+
+implementation
+
+void "test"(int %X)
+begin
+ %reg117 = add int %X, 0
+ ret void
+end
diff --git a/test/Transforms/InstCombine/2002-05-14-SubFailure.ll b/test/Transforms/InstCombine/2002-05-14-SubFailure.ll
new file mode 100644
index 0000000..34c2df6
--- /dev/null
+++ b/test/Transforms/InstCombine/2002-05-14-SubFailure.ll
@@ -0,0 +1,10 @@
+; Instcombine was missing a test that caused it to make illegal transformations
+; sometimes. In this case, it transforms the sub into an add:
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep sub
+;
+define i32 @test(i32 %i, i32 %j) {
+ %A = mul i32 %i, %j
+ %B = sub i32 2, %A
+ ret i32 %B
+}
+
diff --git a/test/Transforms/InstCombine/2002-05-14-TouchDeletedInst.ll b/test/Transforms/InstCombine/2002-05-14-TouchDeletedInst.ll
new file mode 100644
index 0000000..5e6d63b
--- /dev/null
+++ b/test/Transforms/InstCombine/2002-05-14-TouchDeletedInst.ll
@@ -0,0 +1,510 @@
+; This testcase, obviously distilled from a large program (bzip2 from
+; Specint2000) caused instcombine to fail because it got the same instruction
+; on it's worklist more than once (which is ok), but then deleted the
+; instruction. Since the inst stayed on the worklist, as soon as it came back
+; up to be processed, bad things happened, and opt asserted.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine
+; END.
+;
+
+%.LC0 = internal global [21 x sbyte] c"hbMakeCodeLengths(1)\00" ; <[21 x sbyte]*> [#uses=1]
+%.LC1 = internal global [21 x sbyte] c"hbMakeCodeLengths(2)\00" ; <[21 x sbyte]*> [#uses=1]
+
+implementation ; Functions:
+
+void %hbMakeCodeLengths(ubyte* %len, int* %freq, int %alphaSize, int %maxLen) {
+bb0: ;[#uses=0]
+ %len = alloca ubyte* ; <ubyte**> [#uses=2]
+ store ubyte* %len, ubyte** %len
+ %freq = alloca int* ; <int**> [#uses=2]
+ store int* %freq, int** %freq
+ %alphaSize = alloca int ; <int*> [#uses=2]
+ store int %alphaSize, int* %alphaSize
+ %maxLen = alloca int ; <int*> [#uses=2]
+ store int %maxLen, int* %maxLen
+ %heap = alloca int, uint 260 ; <int*> [#uses=27]
+ %weight = alloca int, uint 516 ; <int*> [#uses=18]
+ %parent = alloca int, uint 516 ; <int*> [#uses=7]
+ br label %bb1
+
+bb1: ;[#uses=2]
+ %reg107 = load ubyte** %len ; <ubyte*> [#uses=1]
+ %reg108 = load int** %freq ; <int*> [#uses=1]
+ %reg109 = load int* %alphaSize ; <int> [#uses=10]
+ %reg110 = load int* %maxLen ; <int> [#uses=1]
+ %cond747 = setge int 0, %reg109 ; <bool> [#uses=1]
+ br bool %cond747, label %bb6, label %bb2
+
+bb2: ;[#uses=2]
+ %reg591 = phi int [ %reg594, %bb5 ], [ 0, %bb1 ] ; <int> [#uses=3]
+ %reg591-idxcast1 = cast int %reg591 to uint ; <uint> [#uses=1]
+ %reg591-idxcast1-offset = add uint %reg591-idxcast1, 1 ; <uint> [#uses=1]
+ %reg591-idxcast1-offset = cast uint %reg591-idxcast1-offset to long ; <long> [#uses=1]
+ %reg126 = getelementptr int* %weight, long %reg591-idxcast1-offset ; <int*> [#uses=1]
+ %reg591-idxcast = cast int %reg591 to long ; <long> [#uses=1]
+ %reg132 = getelementptr int* %reg108, long %reg591-idxcast ; <int*> [#uses=1]
+ %reg133 = load int* %reg132 ; <int> [#uses=2]
+ %cond748 = seteq int %reg133, 0 ; <bool> [#uses=1]
+ br bool %cond748, label %bb4, label %bb3
+
+bb3: ;[#uses=2]
+ %reg127 = shl int %reg133, ubyte 8 ; <int> [#uses=1]
+ br label %bb5
+
+bb4: ;[#uses=2]
+ br label %bb5
+
+bb5: ;[#uses=3]
+ %reg593 = phi int [ 256, %bb4 ], [ %reg127, %bb3 ] ; <int> [#uses=1]
+ store int %reg593, int* %reg126
+ %reg594 = add int %reg591, 1 ; <int> [#uses=2]
+ %cond749 = setlt int %reg594, %reg109 ; <bool> [#uses=1]
+ br bool %cond749, label %bb2, label %bb6
+
+bb6: ;[#uses=6]
+ store int 0, int* %heap
+ store int 0, int* %weight
+ store int -2, int* %parent
+ %cond750 = setgt int 1, %reg109 ; <bool> [#uses=1]
+ br bool %cond750, label %bb11, label %bb7
+
+bb7: ;[#uses=3]
+ %reg597 = phi uint [ %reg598, %bb10 ], [ 0, %bb6 ] ; <uint> [#uses=5]
+ %reg597-casted = cast uint %reg597 to int ; <int> [#uses=1]
+ %reg596 = add int %reg597-casted, 1 ; <int> [#uses=3]
+ %reg597-offset = add uint %reg597, 1 ; <uint> [#uses=1]
+ %reg597-offset = cast uint %reg597-offset to long ; <long> [#uses=1]
+ %reg149 = getelementptr int* %parent, long %reg597-offset ; <int*> [#uses=1]
+ store int -1, int* %reg149
+ %reg598 = add uint %reg597, 1 ; <uint> [#uses=3]
+ %reg597-offset1 = add uint %reg597, 1 ; <uint> [#uses=1]
+ %reg597-offset1 = cast uint %reg597-offset1 to long ; <long> [#uses=1]
+ %reg157 = getelementptr int* %heap, long %reg597-offset1 ; <int*> [#uses=1]
+ store int %reg596, int* %reg157
+ br label %bb9
+
+bb8: ;[#uses=2]
+ %reg599 = cast uint %reg599 to long ; <long> [#uses=1]
+ %reg198 = getelementptr int* %heap, long %reg599 ; <int*> [#uses=1]
+ store int %reg182, int* %reg198
+ %cast938 = cast int %reg174 to uint ; <uint> [#uses=1]
+ br label %bb9
+
+bb9: ;[#uses=2]
+ %reg599 = phi uint [ %cast938, %bb8 ], [ %reg598, %bb7 ] ; <uint> [#uses=3]
+ %cast807 = cast uint %reg599 to int ; <int> [#uses=1]
+ %reg597-offset2 = add uint %reg597, 1 ; <uint> [#uses=1]
+ %reg597-offset2 = cast uint %reg597-offset2 to long ; <long> [#uses=1]
+ %reg173 = getelementptr int* %weight, long %reg597-offset2 ; <int*> [#uses=1]
+ %reg174 = shr int %cast807, ubyte 1 ; <int> [#uses=2]
+ %reg174-idxcast = cast int %reg174 to uint ; <uint> [#uses=1]
+ cast uint %reg174-idxcast to long ; <long>:0 [#uses=1]
+ %reg181 = getelementptr int* %heap, long %0 ; <int*> [#uses=1]
+ %reg182 = load int* %reg181 ; <int> [#uses=2]
+ %reg182-idxcast = cast int %reg182 to uint ; <uint> [#uses=1]
+ cast uint %reg182-idxcast to long ; <long>:1 [#uses=1]
+ %reg189 = getelementptr int* %weight, long %1 ; <int*> [#uses=1]
+ %reg190 = load int* %reg173 ; <int> [#uses=1]
+ %reg191 = load int* %reg189 ; <int> [#uses=1]
+ %cond751 = setlt int %reg190, %reg191 ; <bool> [#uses=1]
+ br bool %cond751, label %bb8, label %bb10
+
+bb10: ;[#uses=3]
+ cast uint %reg599 to long ; <long>:2 [#uses=1]
+ %reg214 = getelementptr int* %heap, long %2 ; <int*> [#uses=1]
+ store int %reg596, int* %reg214
+ %reg601 = add int %reg596, 1 ; <int> [#uses=1]
+ %cond752 = setle int %reg601, %reg109 ; <bool> [#uses=1]
+ br bool %cond752, label %bb7, label %bb11
+
+bb11: ;[#uses=2]
+ %reg602 = phi uint [ %reg598, %bb10 ], [ 0, %bb6 ] ; <uint> [#uses=3]
+ %cast819 = cast uint %reg602 to int ; <int> [#uses=1]
+ %cast818 = cast uint %reg602 to int ; <int> [#uses=1]
+ %cond753 = setle int %cast818, 259 ; <bool> [#uses=1]
+ br bool %cond753, label %bb13, label %bb12
+
+bb12: ;[#uses=1]
+ cast uint 0 to long ; <long>:3 [#uses=1]
+ cast uint 0 to long ; <long>:4 [#uses=1]
+ %cast784 = getelementptr [21 x sbyte]* %.LC0, long %3, long %4 ; <sbyte*> [#uses=1]
+ call void %panic( sbyte* %cast784 )
+ br label %bb13
+
+bb13: ;[#uses=4]
+ %cond754 = setle int %cast819, 1 ; <bool> [#uses=1]
+ %cast918 = cast int %reg109 to uint ; <uint> [#uses=1]
+ %cast940 = cast uint %reg602 to int ; <int> [#uses=1]
+ %cast942 = cast int %reg109 to uint ; <uint> [#uses=1]
+ br bool %cond754, label %bb32, label %bb14
+
+bb14: ;[#uses=5]
+ %cann-indvar1 = phi uint [ 0, %bb13 ], [ %add1-indvar1, %bb31 ] ; <uint> [#uses=3]
+ %cann-indvar1-casted = cast uint %cann-indvar1 to int ; <int> [#uses=1]
+ %reg603-scale = mul int %cann-indvar1-casted, -1 ; <int> [#uses=1]
+ %reg603 = add int %reg603-scale, %cast940 ; <int> [#uses=4]
+ %reg604 = add uint %cann-indvar1, %cast942 ; <uint> [#uses=4]
+ %add1-indvar1 = add uint %cann-indvar1, 1 ; <uint> [#uses=1]
+ cast uint 1 to long ; <long>:5 [#uses=1]
+ %reg7551 = getelementptr int* %heap, long %5 ; <int*> [#uses=1]
+ %reg113 = load int* %reg7551 ; <int> [#uses=2]
+ %reg603-idxcast = cast int %reg603 to uint ; <uint> [#uses=1]
+ cast uint %reg603-idxcast to long ; <long>:6 [#uses=1]
+ %reg222 = getelementptr int* %heap, long %6 ; <int*> [#uses=1]
+ %reg223 = load int* %reg222 ; <int> [#uses=1]
+ cast uint 1 to long ; <long>:7 [#uses=1]
+ %reg7561 = getelementptr int* %heap, long %7 ; <int*> [#uses=1]
+ store int %reg223, int* %reg7561
+ %reg605 = add int %reg603, -1 ; <int> [#uses=4]
+ cast uint 1 to long ; <long>:8 [#uses=1]
+ %reg757 = getelementptr int* %heap, long %8 ; <int*> [#uses=1]
+ %reg226 = load int* %reg757 ; <int> [#uses=2]
+ %cond758 = setgt int 2, %reg605 ; <bool> [#uses=1]
+ br bool %cond758, label %bb20, label %bb15
+
+bb15: ;[#uses=3]
+ %reg606 = phi int [ %reg611, %bb19 ], [ 2, %bb14 ] ; <int> [#uses=6]
+ %reg607 = phi int [ %reg609, %bb19 ], [ 1, %bb14 ] ; <int> [#uses=2]
+ %cond759 = setge int %reg606, %reg605 ; <bool> [#uses=1]
+ br bool %cond759, label %bb18, label %bb16
+
+bb16: ;[#uses=2]
+ %reg606-idxcast = cast int %reg606 to uint ; <uint> [#uses=1]
+ %reg606-idxcast-offset = add uint %reg606-idxcast, 1 ; <uint> [#uses=1]
+ cast uint %reg606-idxcast-offset to long ; <long>:9 [#uses=1]
+ %reg241 = getelementptr int* %heap, long %9 ; <int*> [#uses=1]
+ %reg242 = load int* %reg241 ; <int> [#uses=1]
+ %reg242-idxcast = cast int %reg242 to uint ; <uint> [#uses=1]
+ cast uint %reg242-idxcast to long ; <long>:10 [#uses=1]
+ %reg249 = getelementptr int* %weight, long %10 ; <int*> [#uses=1]
+ %reg606-idxcast1 = cast int %reg606 to uint ; <uint> [#uses=1]
+ cast uint %reg606-idxcast1 to long ; <long>:11 [#uses=1]
+ %reg256 = getelementptr int* %heap, long %11 ; <int*> [#uses=1]
+ %reg257 = load int* %reg256 ; <int> [#uses=1]
+ %reg257-idxcast = cast int %reg257 to uint ; <uint> [#uses=1]
+ cast uint %reg257-idxcast to long ; <long>:12 [#uses=1]
+ %reg264 = getelementptr int* %weight, long %12 ; <int*> [#uses=1]
+ %reg265 = load int* %reg249 ; <int> [#uses=1]
+ %reg266 = load int* %reg264 ; <int> [#uses=1]
+ %cond760 = setge int %reg265, %reg266 ; <bool> [#uses=1]
+ br bool %cond760, label %bb18, label %bb17
+
+bb17: ;[#uses=2]
+ %reg608 = add int %reg606, 1 ; <int> [#uses=1]
+ br label %bb18
+
+bb18: ;[#uses=4]
+ %reg609 = phi int [ %reg608, %bb17 ], [ %reg606, %bb16 ], [ %reg606, %bb15 ] ; <int> [#uses=4]
+ %reg226-idxcast = cast int %reg226 to uint ; <uint> [#uses=1]
+ cast uint %reg226-idxcast to long ; <long>:13 [#uses=1]
+ %reg273 = getelementptr int* %weight, long %13 ; <int*> [#uses=1]
+ %reg609-idxcast = cast int %reg609 to uint ; <uint> [#uses=1]
+ cast uint %reg609-idxcast to long ; <long>:14 [#uses=1]
+ %reg280 = getelementptr int* %heap, long %14 ; <int*> [#uses=1]
+ %reg281 = load int* %reg280 ; <int> [#uses=2]
+ %reg281-idxcast = cast int %reg281 to uint ; <uint> [#uses=1]
+ cast uint %reg281-idxcast to long ; <long>:15 [#uses=1]
+ %reg288 = getelementptr int* %weight, long %15 ; <int*> [#uses=1]
+ %reg289 = load int* %reg273 ; <int> [#uses=1]
+ %reg290 = load int* %reg288 ; <int> [#uses=1]
+ %cond761 = setlt int %reg289, %reg290 ; <bool> [#uses=1]
+ br bool %cond761, label %bb20, label %bb19
+
+bb19: ;[#uses=4]
+ %reg607-idxcast = cast int %reg607 to uint ; <uint> [#uses=1]
+ cast uint %reg607-idxcast to long ; <long>:16 [#uses=1]
+ %reg297 = getelementptr int* %heap, long %16 ; <int*> [#uses=1]
+ store int %reg281, int* %reg297
+ %reg611 = shl int %reg609, ubyte 1 ; <int> [#uses=2]
+ %cond762 = setle int %reg611, %reg605 ; <bool> [#uses=1]
+ br bool %cond762, label %bb15, label %bb20
+
+bb20: ;[#uses=6]
+ %reg612 = phi int [ %reg609, %bb19 ], [ %reg607, %bb18 ], [ 1, %bb14 ] ; <int> [#uses=1]
+ %reg612-idxcast = cast int %reg612 to uint ; <uint> [#uses=1]
+ cast uint %reg612-idxcast to long ; <long>:17 [#uses=1]
+ %reg312 = getelementptr int* %heap, long %17 ; <int*> [#uses=1]
+ store int %reg226, int* %reg312
+ cast uint 1 to long ; <long>:18 [#uses=1]
+ %reg7631 = getelementptr int* %heap, long %18 ; <int*> [#uses=1]
+ %reg114 = load int* %reg7631 ; <int> [#uses=2]
+ %reg603-idxcast1 = cast int %reg603 to uint ; <uint> [#uses=1]
+ %reg603-idxcast1-offset = add uint %reg603-idxcast1, 1073741823 ; <uint> [#uses=1]
+ cast uint %reg603-idxcast1-offset to long ; <long>:19 [#uses=1]
+ %reg319 = getelementptr int* %heap, long %19 ; <int*> [#uses=1]
+ %reg320 = load int* %reg319 ; <int> [#uses=1]
+ cast uint 1 to long ; <long>:20 [#uses=1]
+ %reg7641 = getelementptr int* %heap, long %20 ; <int*> [#uses=1]
+ store int %reg320, int* %reg7641
+ %reg613 = add int %reg605, -1 ; <int> [#uses=4]
+ cast uint 1 to long ; <long>:21 [#uses=1]
+ %reg765 = getelementptr int* %heap, long %21 ; <int*> [#uses=1]
+ %reg323 = load int* %reg765 ; <int> [#uses=2]
+ %cond766 = setgt int 2, %reg613 ; <bool> [#uses=1]
+ br bool %cond766, label %bb26, label %bb21
+
+bb21: ;[#uses=3]
+ %reg614 = phi int [ %reg619, %bb25 ], [ 2, %bb20 ] ; <int> [#uses=6]
+ %reg615 = phi int [ %reg617, %bb25 ], [ 1, %bb20 ] ; <int> [#uses=2]
+ %cond767 = setge int %reg614, %reg613 ; <bool> [#uses=1]
+ br bool %cond767, label %bb24, label %bb22
+
+bb22: ;[#uses=2]
+ %reg614-idxcast = cast int %reg614 to uint ; <uint> [#uses=1]
+ %reg614-idxcast-offset = add uint %reg614-idxcast, 1 ; <uint> [#uses=1]
+ cast uint %reg614-idxcast-offset to long ; <long>:22 [#uses=1]
+ %reg338 = getelementptr int* %heap, long %22 ; <int*> [#uses=1]
+ %reg339 = load int* %reg338 ; <int> [#uses=1]
+ %reg339-idxcast = cast int %reg339 to uint ; <uint> [#uses=1]
+ cast uint %reg339-idxcast to long ; <long>:23 [#uses=1]
+ %reg346 = getelementptr int* %weight, long %23 ; <int*> [#uses=1]
+ %reg614-idxcast1 = cast int %reg614 to uint ; <uint> [#uses=1]
+ cast uint %reg614-idxcast1 to long ; <long>:24 [#uses=1]
+ %reg353 = getelementptr int* %heap, long %24 ; <int*> [#uses=1]
+ %reg354 = load int* %reg353 ; <int> [#uses=1]
+ %reg354-idxcast = cast int %reg354 to uint ; <uint> [#uses=1]
+ cast uint %reg354-idxcast to long ; <long>:25 [#uses=1]
+ %reg361 = getelementptr int* %weight, long %25 ; <int*> [#uses=1]
+ %reg362 = load int* %reg346 ; <int> [#uses=1]
+ %reg363 = load int* %reg361 ; <int> [#uses=1]
+ %cond768 = setge int %reg362, %reg363 ; <bool> [#uses=1]
+ br bool %cond768, label %bb24, label %bb23
+
+bb23: ;[#uses=2]
+ %reg616 = add int %reg614, 1 ; <int> [#uses=1]
+ br label %bb24
+
+bb24: ;[#uses=4]
+ %reg617 = phi int [ %reg616, %bb23 ], [ %reg614, %bb22 ], [ %reg614, %bb21 ] ; <int> [#uses=4]
+ %reg323-idxcast = cast int %reg323 to uint ; <uint> [#uses=1]
+ cast uint %reg323-idxcast to long ; <long>:26 [#uses=1]
+ %reg370 = getelementptr int* %weight, long %26 ; <int*> [#uses=1]
+ %reg617-idxcast = cast int %reg617 to uint ; <uint> [#uses=1]
+ cast uint %reg617-idxcast to long ; <long>:27 [#uses=1]
+ %reg377 = getelementptr int* %heap, long %27 ; <int*> [#uses=1]
+ %reg378 = load int* %reg377 ; <int> [#uses=2]
+ %reg378-idxcast = cast int %reg378 to uint ; <uint> [#uses=1]
+ cast uint %reg378-idxcast to long ; <long>:28 [#uses=1]
+ %reg385 = getelementptr int* %weight, long %28 ; <int*> [#uses=1]
+ %reg386 = load int* %reg370 ; <int> [#uses=1]
+ %reg387 = load int* %reg385 ; <int> [#uses=1]
+ %cond769 = setlt int %reg386, %reg387 ; <bool> [#uses=1]
+ br bool %cond769, label %bb26, label %bb25
+
+bb25: ;[#uses=4]
+ %reg615-idxcast = cast int %reg615 to uint ; <uint> [#uses=1]
+ cast uint %reg615-idxcast to long ; <long>:29 [#uses=1]
+ %reg394 = getelementptr int* %heap, long %29 ; <int*> [#uses=1]
+ store int %reg378, int* %reg394
+ %reg619 = shl int %reg617, ubyte 1 ; <int> [#uses=2]
+ %cond770 = setle int %reg619, %reg613 ; <bool> [#uses=1]
+ br bool %cond770, label %bb21, label %bb26
+
+bb26: ;[#uses=4]
+ %reg620 = phi int [ %reg617, %bb25 ], [ %reg615, %bb24 ], [ 1, %bb20 ] ; <int> [#uses=1]
+ %reg620-idxcast = cast int %reg620 to uint ; <uint> [#uses=1]
+ cast uint %reg620-idxcast to long ; <long>:30 [#uses=1]
+ %reg409 = getelementptr int* %heap, long %30 ; <int*> [#uses=1]
+ store int %reg323, int* %reg409
+ %reg621 = add uint %reg604, 1 ; <uint> [#uses=5]
+ %reg113-idxcast = cast int %reg113 to uint ; <uint> [#uses=1]
+ cast uint %reg113-idxcast to long ; <long>:31 [#uses=1]
+ %reg416 = getelementptr int* %parent, long %31 ; <int*> [#uses=1]
+ %reg114-idxcast = cast int %reg114 to uint ; <uint> [#uses=1]
+ cast uint %reg114-idxcast to long ; <long>:32 [#uses=1]
+ %reg423 = getelementptr int* %parent, long %32 ; <int*> [#uses=1]
+ %cast889 = cast uint %reg621 to int ; <int> [#uses=1]
+ store int %cast889, int* %reg423
+ %cast890 = cast uint %reg621 to int ; <int> [#uses=1]
+ store int %cast890, int* %reg416
+ %reg604-offset = add uint %reg604, 1 ; <uint> [#uses=1]
+ cast uint %reg604-offset to long ; <long>:33 [#uses=1]
+ %reg431 = getelementptr int* %weight, long %33 ; <int*> [#uses=1]
+ %reg113-idxcast2 = cast int %reg113 to uint ; <uint> [#uses=1]
+ cast uint %reg113-idxcast2 to long ; <long>:34 [#uses=1]
+ %reg4381 = getelementptr int* %weight, long %34 ; <int*> [#uses=1]
+ %reg439 = load int* %reg4381 ; <int> [#uses=2]
+ %reg440 = and int %reg439, -256 ; <int> [#uses=1]
+ %reg114-idxcast2 = cast int %reg114 to uint ; <uint> [#uses=1]
+ cast uint %reg114-idxcast2 to long ; <long>:35 [#uses=1]
+ %reg4471 = getelementptr int* %weight, long %35 ; <int*> [#uses=1]
+ %reg448 = load int* %reg4471 ; <int> [#uses=2]
+ %reg449 = and int %reg448, -256 ; <int> [#uses=1]
+ %reg450 = add int %reg440, %reg449 ; <int> [#uses=1]
+ %reg460 = and int %reg439, 255 ; <int> [#uses=2]
+ %reg451 = and int %reg448, 255 ; <int> [#uses=2]
+ %cond771 = setge int %reg451, %reg460 ; <bool> [#uses=1]
+ br bool %cond771, label %bb28, label %bb27
+
+bb27: ;[#uses=2]
+ br label %bb28
+
+bb28: ;[#uses=3]
+ %reg623 = phi int [ %reg460, %bb27 ], [ %reg451, %bb26 ] ; <int> [#uses=1]
+ %reg469 = add int %reg623, 1 ; <int> [#uses=1]
+ %reg470 = or int %reg450, %reg469 ; <int> [#uses=1]
+ store int %reg470, int* %reg431
+ %reg604-offset1 = add uint %reg604, 1 ; <uint> [#uses=1]
+ cast uint %reg604-offset1 to long ; <long>:36 [#uses=1]
+ %reg4771 = getelementptr int* %parent, long %36 ; <int*> [#uses=1]
+ store int -1, int* %reg4771
+ %reg624 = add int %reg613, 1 ; <int> [#uses=2]
+ %reg603-idxcast2 = cast int %reg603 to uint ; <uint> [#uses=1]
+ %reg603-idxcast2-offset = add uint %reg603-idxcast2, 1073741823 ; <uint> [#uses=1]
+ cast uint %reg603-idxcast2-offset to long ; <long>:37 [#uses=1]
+ %reg485 = getelementptr int* %heap, long %37 ; <int*> [#uses=1]
+ %cast902 = cast uint %reg621 to int ; <int> [#uses=1]
+ store int %cast902, int* %reg485
+ br label %bb30
+
+bb29: ;[#uses=2]
+ %reg625-idxcast = cast int %reg625 to uint ; <uint> [#uses=1]
+ cast uint %reg625-idxcast to long ; <long>:38 [#uses=1]
+ %reg526 = getelementptr int* %heap, long %38 ; <int*> [#uses=1]
+ store int %reg510, int* %reg526
+ br label %bb30
+
+bb30: ;[#uses=2]
+ %reg625 = phi int [ %reg502, %bb29 ], [ %reg624, %bb28 ] ; <int> [#uses=3]
+ %reg604-offset2 = add uint %reg604, 1 ; <uint> [#uses=1]
+ cast uint %reg604-offset2 to long ; <long>:39 [#uses=1]
+ %reg501 = getelementptr int* %weight, long %39 ; <int*> [#uses=1]
+ %reg502 = shr int %reg625, ubyte 1 ; <int> [#uses=2]
+ %reg502-idxcast = cast int %reg502 to uint ; <uint> [#uses=1]
+ cast uint %reg502-idxcast to long ; <long>:40 [#uses=1]
+ %reg509 = getelementptr int* %heap, long %40 ; <int*> [#uses=1]
+ %reg510 = load int* %reg509 ; <int> [#uses=2]
+ %reg510-idxcast = cast int %reg510 to uint ; <uint> [#uses=1]
+ cast uint %reg510-idxcast to long ; <long>:41 [#uses=1]
+ %reg517 = getelementptr int* %weight, long %41 ; <int*> [#uses=1]
+ %reg518 = load int* %reg501 ; <int> [#uses=1]
+ %reg519 = load int* %reg517 ; <int> [#uses=1]
+ %cond772 = setlt int %reg518, %reg519 ; <bool> [#uses=1]
+ br bool %cond772, label %bb29, label %bb31
+
+bb31: ;[#uses=3]
+ %reg625-idxcast1 = cast int %reg625 to uint ; <uint> [#uses=1]
+ cast uint %reg625-idxcast1 to long ; <long>:42 [#uses=1]
+ %reg542 = getelementptr int* %heap, long %42 ; <int*> [#uses=1]
+ %cast916 = cast uint %reg621 to int ; <int> [#uses=1]
+ store int %cast916, int* %reg542
+ %cond773 = setgt int %reg624, 1 ; <bool> [#uses=1]
+ br bool %cond773, label %bb14, label %bb32
+
+bb32: ;[#uses=2]
+ %reg627 = phi uint [ %reg621, %bb31 ], [ %cast918, %bb13 ] ; <uint> [#uses=1]
+ %cast919 = cast uint %reg627 to int ; <int> [#uses=1]
+ %cond774 = setle int %cast919, 515 ; <bool> [#uses=1]
+ br bool %cond774, label %bb34, label %bb33
+
+bb33: ;[#uses=1]
+ cast uint 0 to long ; <long>:43 [#uses=1]
+ cast uint 0 to long ; <long>:44 [#uses=1]
+ %cast785 = getelementptr [21 x sbyte]* %.LC1, long %43, long %44 ; <sbyte*> [#uses=1]
+ call void %panic( sbyte* %cast785 )
+ br label %bb34
+
+bb34: ;[#uses=5]
+ %cond775 = setgt int 1, %reg109 ; <bool> [#uses=1]
+ br bool %cond775, label %bb40, label %bb35
+
+bb35: ;[#uses=5]
+ %reg629 = phi ubyte [ %reg639, %bb39 ], [ 0, %bb34 ] ; <ubyte> [#uses=1]
+ %cann-indvar = phi uint [ 0, %bb34 ], [ %add1-indvar, %bb39 ] ; <uint> [#uses=4]
+ %cann-indvar-casted = cast uint %cann-indvar to int ; <int> [#uses=1]
+ %reg630 = add int %cann-indvar-casted, 1 ; <int> [#uses=2]
+ %add1-indvar = add uint %cann-indvar, 1 ; <uint> [#uses=1]
+ %cann-indvar-offset1 = add uint %cann-indvar, 1 ; <uint> [#uses=1]
+ cast uint %cann-indvar-offset1 to long ; <long>:45 [#uses=1]
+ %reg589 = getelementptr int* %parent, long %45 ; <int*> [#uses=1]
+ %reg590 = load int* %reg589 ; <int> [#uses=1]
+ %cond776 = setlt int %reg590, 0 ; <bool> [#uses=1]
+ %parent-idxcast = cast int* %parent to uint ; <uint> [#uses=1]
+ %cast948 = cast int %reg630 to uint ; <uint> [#uses=1]
+ br bool %cond776, label %bb37, label %bb36
+
+bb36: ;[#uses=5]
+ %reg632 = phi uint [ %reg634, %bb36 ], [ %cast948, %bb35 ] ; <uint> [#uses=1]
+ %reg633 = phi uint [ %reg635, %bb36 ], [ 0, %bb35 ] ; <uint> [#uses=3]
+ %reg633-casted = cast uint %reg633 to sbyte* ; <sbyte*> [#uses=0]
+ %reg631-scale = mul uint %reg633, 0 ; <uint> [#uses=1]
+ %reg631-scale = cast uint %reg631-scale to sbyte* ; <sbyte*> [#uses=1]
+ cast uint %parent-idxcast to long ; <long>:46 [#uses=1]
+ %reg6311 = getelementptr sbyte* %reg631-scale, long %46 ; <sbyte*> [#uses=2]
+ %reg632-scale = mul uint %reg632, 4 ; <uint> [#uses=1]
+ cast uint %reg632-scale to long ; <long>:47 [#uses=1]
+ %reg5581 = getelementptr sbyte* %reg6311, long %47 ; <sbyte*> [#uses=1]
+ %cast924 = cast sbyte* %reg5581 to uint* ; <uint*> [#uses=1]
+ %reg634 = load uint* %cast924 ; <uint> [#uses=2]
+ %reg635 = add uint %reg633, 1 ; <uint> [#uses=2]
+ %reg634-scale = mul uint %reg634, 4 ; <uint> [#uses=1]
+ cast uint %reg634-scale to long ; <long>:48 [#uses=1]
+ %reg5501 = getelementptr sbyte* %reg6311, long %48 ; <sbyte*> [#uses=1]
+ %cast925 = cast sbyte* %reg5501 to int* ; <int*> [#uses=1]
+ %reg551 = load int* %cast925 ; <int> [#uses=1]
+ %cond777 = setge int %reg551, 0 ; <bool> [#uses=1]
+ br bool %cond777, label %bb36, label %bb37
+
+bb37: ;[#uses=3]
+ %reg637 = phi uint [ %reg635, %bb36 ], [ 0, %bb35 ] ; <uint> [#uses=2]
+ %cast928 = cast uint %reg637 to int ; <int> [#uses=1]
+ %cann-indvar-offset = add uint %cann-indvar, 1 ; <uint> [#uses=1]
+ cast uint %cann-indvar-offset to long ; <long>:49 [#uses=1]
+ %reg561 = getelementptr ubyte* %reg107, long %49 ; <ubyte*> [#uses=1]
+ cast uint 4294967295 to long ; <long>:50 [#uses=1]
+ %reg778 = getelementptr ubyte* %reg561, long %50 ; <ubyte*> [#uses=1]
+ %cast788 = cast uint %reg637 to ubyte ; <ubyte> [#uses=1]
+ store ubyte %cast788, ubyte* %reg778
+ %cond779 = setle int %cast928, %reg110 ; <bool> [#uses=1]
+ br bool %cond779, label %bb39, label %bb38
+
+bb38: ;[#uses=2]
+ br label %bb39
+
+bb39: ;[#uses=5]
+ %reg639 = phi ubyte [ 1, %bb38 ], [ %reg629, %bb37 ] ; <ubyte> [#uses=2]
+ %reg640 = add int %reg630, 1 ; <int> [#uses=1]
+ %cond780 = setle int %reg640, %reg109 ; <bool> [#uses=1]
+ br bool %cond780, label %bb35, label %bb40
+
+bb40: ;[#uses=2]
+ %reg641 = phi ubyte [ %reg639, %bb39 ], [ 0, %bb34 ] ; <ubyte> [#uses=1]
+ %cond781 = seteq ubyte %reg641, 0 ; <bool> [#uses=1]
+ br bool %cond781, label %bb44, label %bb41
+
+bb41: ;[#uses=2]
+ %cond782 = setge int 1, %reg109 ; <bool> [#uses=1]
+ br bool %cond782, label %bb6, label %bb42
+
+bb42: ;[#uses=3]
+ %cann-indvar2 = phi int [ 0, %bb41 ], [ %add1-indvar2, %bb42 ] ; <int> [#uses=3]
+ %reg643 = add int %cann-indvar2, 1 ; <int> [#uses=1]
+ %add1-indvar2 = add int %cann-indvar2, 1 ; <int> [#uses=1]
+ %cann-indvar2-idxcast = cast int %cann-indvar2 to uint ; <uint> [#uses=1]
+ %cann-indvar2-idxcast-offset = add uint %cann-indvar2-idxcast, 1 ; <uint> [#uses=1]
+ cast uint %cann-indvar2-idxcast-offset to long ; <long>:51 [#uses=1]
+ %reg569 = getelementptr int* %weight, long %51 ; <int*> [#uses=2]
+ %reg570 = load int* %reg569 ; <int> [#uses=2]
+ %reg644 = shr int %reg570, ubyte 8 ; <int> [#uses=1]
+ %reg572 = shr int %reg570, ubyte 31 ; <int> [#uses=1]
+ %cast933 = cast int %reg572 to uint ; <uint> [#uses=1]
+ %reg573 = shr uint %cast933, ubyte 31 ; <uint> [#uses=1]
+ %cast934 = cast uint %reg573 to int ; <int> [#uses=1]
+ %reg574 = add int %reg644, %cast934 ; <int> [#uses=1]
+ %reg571 = shr int %reg574, ubyte 1 ; <int> [#uses=1]
+ %reg645 = add int %reg571, 1 ; <int> [#uses=1]
+ %reg582 = shl int %reg645, ubyte 8 ; <int> [#uses=1]
+ store int %reg582, int* %reg569
+ %reg646 = add int %reg643, 1 ; <int> [#uses=1]
+ %cond783 = setlt int %reg646, %reg109 ; <bool> [#uses=1]
+ br bool %cond783, label %bb42, label %bb43
+
+bb43: ;[#uses=1]
+ br label %bb6
+
+bb44: ;[#uses=1]
+ ret void
+}
+
+declare void %panic(sbyte*)
diff --git a/test/Transforms/InstCombine/2002-08-02-CastTest.ll b/test/Transforms/InstCombine/2002-08-02-CastTest.ll
new file mode 100644
index 0000000..23284a6
--- /dev/null
+++ b/test/Transforms/InstCombine/2002-08-02-CastTest.ll
@@ -0,0 +1,11 @@
+; This testcase is incorrectly getting completely eliminated. There should be
+; SOME instruction named %c here, even if it's a bitwise and.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep %c
+;
+ulong %test3(ulong %A) {
+ %c1 = cast ulong %A to ubyte
+ %c2 = cast ubyte %c1 to ulong
+ ret ulong %c2
+}
+
diff --git a/test/Transforms/InstCombine/2002-09-17-GetElementPtrCrash.ll b/test/Transforms/InstCombine/2002-09-17-GetElementPtrCrash.ll
new file mode 100644
index 0000000..69bec19
--- /dev/null
+++ b/test/Transforms/InstCombine/2002-09-17-GetElementPtrCrash.ll
@@ -0,0 +1,12 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine
+
+%bob = type { int }
+
+int %alias() {
+ %pbob1 = alloca %bob
+ %pbob2 = getelementptr %bob* %pbob1
+ %pbobel = getelementptr %bob* %pbob2, long 0, uint 0
+ %rval = load int* %pbobel
+ ret int %rval
+}
+
diff --git a/test/Transforms/InstCombine/2002-12-05-MissedConstProp.ll b/test/Transforms/InstCombine/2002-12-05-MissedConstProp.ll
new file mode 100644
index 0000000..6233e50
--- /dev/null
+++ b/test/Transforms/InstCombine/2002-12-05-MissedConstProp.ll
@@ -0,0 +1,10 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep add
+
+int %test(int %A) {
+ %A.neg = sub int 0, %A
+ %.neg = sub int 0, 1
+ %X = add int %.neg, 1
+ %Y.neg.ra = add int %A, %X
+ %r = add int %A.neg, %Y.neg.ra
+ ret int %r
+}
diff --git a/test/Transforms/InstCombine/2003-05-26-CastMiscompile.ll b/test/Transforms/InstCombine/2003-05-26-CastMiscompile.ll
new file mode 100644
index 0000000..4532589
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-05-26-CastMiscompile.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep 4294967295
+
+ulong %test(ulong %Val) {
+ %tmp.3 = cast ulong %Val to uint ; <uint> [#uses=1]
+ %tmp.8 = cast uint %tmp.3 to ulong ; <ulong> [#uses=1]
+ ret ulong %tmp.8
+}
diff --git a/test/Transforms/InstCombine/2003-05-27-ConstExprCrash.ll b/test/Transforms/InstCombine/2003-05-27-ConstExprCrash.ll
new file mode 100644
index 0000000..6222169
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-05-27-ConstExprCrash.ll
@@ -0,0 +1,8 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+%X = global int 5
+long %test() {
+ %C = add long 1, 2
+ %V = add long cast(int* %X to long), %C
+ ret long %V
+}
diff --git a/test/Transforms/InstCombine/2003-06-05-BranchInvertInfLoop.ll b/test/Transforms/InstCombine/2003-06-05-BranchInvertInfLoop.ll
new file mode 100644
index 0000000..ae823d6
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-06-05-BranchInvertInfLoop.ll
@@ -0,0 +1,14 @@
+; This testcase causes an infinite loop in the instruction combiner,
+; because it things that the constant value is a not expression... and
+; constantly inverts the branch back and forth.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+ubyte %test19(bool %c) {
+ br bool true, label %True, label %False
+True:
+ ret ubyte 1
+False:
+ ret ubyte 3
+}
+
diff --git a/test/Transforms/InstCombine/2003-06-22-ConstantExprCrash.ll b/test/Transforms/InstCombine/2003-06-22-ConstantExprCrash.ll
new file mode 100644
index 0000000..92a32d5
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-06-22-ConstantExprCrash.ll
@@ -0,0 +1,12 @@
+; This is a bug in the VMcode library, not instcombine, it's just convenient
+; to expose it here.
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+%A = global int 1
+%B = global int 2
+
+bool %test() {
+ %C = setlt int* getelementptr (int* %A, long 1), getelementptr (int* %B, long 2) ; Will get promoted to constantexpr
+ ret bool %C
+}
diff --git a/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll b/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll
new file mode 100644
index 0000000..a3c30a8
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll
@@ -0,0 +1,47 @@
+;
+; Test: ExternalConstant
+;
+; Description:
+; This regression test helps check whether the instruction combining
+; optimization pass correctly handles global variables which are marked
+; as external and constant.
+;
+; If a problem occurs, we should die on an assert(). Otherwise, we
+; should pass through the optimizer without failure.
+;
+; Extra code:
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine
+; END.
+;
+
+target endian = little
+target pointersize = 32
+%silly = external constant int ; <int*> [#uses=1]
+
+implementation ; Functions:
+
+declare void %bzero(sbyte*, uint)
+
+declare void %bcopy(sbyte*, sbyte*, uint)
+
+declare int %bcmp(sbyte*, sbyte*, uint)
+
+declare int %fputs(sbyte*, sbyte*)
+
+declare int %fputs_unlocked(sbyte*, sbyte*)
+
+int %function(int %a.1) {
+entry: ; No predecessors!
+ %a.0 = alloca int ; <int*> [#uses=2]
+ %result = alloca int ; <int*> [#uses=2]
+ store int %a.1, int* %a.0
+ %tmp.0 = load int* %a.0 ; <int> [#uses=1]
+ %tmp.1 = load int* %silly ; <int> [#uses=1]
+ %tmp.2 = add int %tmp.0, %tmp.1 ; <int> [#uses=1]
+ store int %tmp.2, int* %result
+ br label %return
+
+return: ; preds = %entry
+ %tmp.3 = load int* %result ; <int> [#uses=1]
+ ret int %tmp.3
+}
diff --git a/test/Transforms/InstCombine/2003-08-12-AllocaNonNull.ll b/test/Transforms/InstCombine/2003-08-12-AllocaNonNull.ll
new file mode 100644
index 0000000..81594db
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-08-12-AllocaNonNull.ll
@@ -0,0 +1,23 @@
+; This testcase can be simplified by "realizing" that alloca can never return
+; null.
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -simplifycfg | \
+; RUN: llvm-dis | not grep br
+
+implementation ; Functions:
+
+declare int %bitmap_clear(...)
+
+int %oof() {
+entry:
+ %live_head = alloca int ; <int*> [#uses=2]
+ %tmp.1 = setne int* %live_head, null ; <bool> [#uses=1]
+ br bool %tmp.1, label %then, label %UnifiedExitNode
+
+then:
+ %tmp.4 = call int (...)* %bitmap_clear( int* %live_head ) ; <int> [#uses=0]
+ br label %UnifiedExitNode
+
+UnifiedExitNode:
+ ret int 0
+}
+
diff --git a/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll b/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll
new file mode 100644
index 0000000..adb1474
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll
@@ -0,0 +1,5 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep load
+void %test(int* %P) {
+ %X = volatile load int* %P ; Dead but not deletable!
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2003-10-23-InstcombineNullFail.ll b/test/Transforms/InstCombine/2003-10-23-InstcombineNullFail.ll
new file mode 100644
index 0000000..837494f
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-10-23-InstcombineNullFail.ll
@@ -0,0 +1,13 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep false
+;
+; This actually looks like a constant propagation bug
+
+%X = type { [10 x int], float }
+
+implementation
+
+bool %test() {
+ %A = getelementptr %X* null, long 0, uint 0, long 0
+ %B = setne int* %A, null
+ ret bool %B
+}
diff --git a/test/Transforms/InstCombine/2003-10-29-CallSiteResolve.ll b/test/Transforms/InstCombine/2003-10-29-CallSiteResolve.ll
new file mode 100644
index 0000000..d23dc32
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-10-29-CallSiteResolve.ll
@@ -0,0 +1,13 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+declare int* %bar()
+
+float* %foo() {
+ %tmp.11 = invoke float* cast (int* ()* %bar to float* ()*)()
+ to label %invoke_cont except label %X
+
+invoke_cont:
+ ret float *%tmp.11
+X:
+ ret float *null
+}
diff --git a/test/Transforms/InstCombine/2003-11-03-VarargsCallBug.ll b/test/Transforms/InstCombine/2003-11-03-VarargsCallBug.ll
new file mode 100644
index 0000000..a2e28d9
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-11-03-VarargsCallBug.ll
@@ -0,0 +1,13 @@
+; The cast in this testcase is not eliminable on a 32-bit target!
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep inttoptr
+
+target endian = little
+target pointersize = 32
+
+declare void %foo(...)
+
+void %test(long %X) {
+ %Y = cast long %X to int*
+ call void (...)* %foo(int* %Y)
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2003-11-13-ConstExprCastCall.ll b/test/Transforms/InstCombine/2003-11-13-ConstExprCastCall.ll
new file mode 100644
index 0000000..d257286
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-11-13-ConstExprCastCall.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep call | notcast
+
+declare void %free(sbyte*)
+
+void %test(int* %X) {
+ call int (...)* cast (void (sbyte*)* %free to int (...)*)(int * %X)
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll b/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll
new file mode 100644
index 0000000..ff90c32
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll
@@ -0,0 +1,24 @@
+; Test for a problem afflicting several C++ programs in the testsuite. The
+; instcombine pass is trying to get rid of the cast in the invoke instruction,
+; inserting a cast of the return value after the PHI instruction, but which is
+; used by the PHI instruction. This is bad: because of the semantics of the
+; invoke instruction, we really cannot perform this transformation at all at
+; least without splitting the critical edge.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+declare sbyte* %test()
+
+int %foo() {
+entry:
+ br bool true, label %cont, label %call
+call:
+ %P = invoke int*()* cast (sbyte*()* %test to int*()*)()
+ to label %cont except label %N
+cont:
+ %P2 = phi int* [%P, %call], [null, %entry]
+ %V = load int* %P2
+ ret int %V
+N:
+ ret int 0
+}
diff --git a/test/Transforms/InstCombine/2004-02-23-ShiftShiftOverflow.ll b/test/Transforms/InstCombine/2004-02-23-ShiftShiftOverflow.ll
new file mode 100644
index 0000000..85095bf
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-02-23-ShiftShiftOverflow.ll
@@ -0,0 +1,15 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep 34
+
+int %test(int %X) {
+ ; Do not fold into shr X, 34, as this uses undefined behavior!
+ %Y = shr int %X, ubyte 17
+ %Z = shr int %Y, ubyte 17
+ ret int %Z
+}
+
+int %test2(int %X) {
+ ; Do not fold into shl X, 34, as this uses undefined behavior!
+ %Y = shl int %X, ubyte 17
+ %Z = shl int %Y, ubyte 17
+ ret int %Z
+}
diff --git a/test/Transforms/InstCombine/2004-03-13-InstCombineInfLoop.ll b/test/Transforms/InstCombine/2004-03-13-InstCombineInfLoop.ll
new file mode 100644
index 0000000..fd7a2e3
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-03-13-InstCombineInfLoop.ll
@@ -0,0 +1,13 @@
+; This testcase caused the combiner to go into an infinite loop, moving the
+; cast back and forth, changing the seteq to operate on int vs uint and back.
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+bool %test(uint %A, int %B) {
+ %C = sub uint 0, %A
+ %Cc = cast uint %C to int
+ %D = sub int 0, %B
+ %E = seteq int %Cc, %D
+ ret bool %E
+}
+
diff --git a/test/Transforms/InstCombine/2004-04-04-InstCombineReplaceAllUsesWith.ll b/test/Transforms/InstCombine/2004-04-04-InstCombineReplaceAllUsesWith.ll
new file mode 100644
index 0000000..4c6ff1b
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-04-04-InstCombineReplaceAllUsesWith.ll
@@ -0,0 +1,8 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+int %test() {
+ ret int 0
+Loop:
+ %X = add int %X, 1
+ br label %Loop
+}
diff --git a/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll b/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll
new file mode 100644
index 0000000..8e8f019
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+%Ty = type opaque
+
+int %test(%Ty *%X) {
+ %Y = cast %Ty* %X to int*
+ %Z = load int* %Y
+ ret int %Z
+}
diff --git a/test/Transforms/InstCombine/2004-07-27-ConstantExprMul.ll b/test/Transforms/InstCombine/2004-07-27-ConstantExprMul.ll
new file mode 100644
index 0000000..7994d45
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-07-27-ConstantExprMul.ll
@@ -0,0 +1,8 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+%p = weak global int 0
+
+int %test(int %x) {
+ %y = mul int %x, cast (int* %p to int)
+ ret int %y
+}
diff --git a/test/Transforms/InstCombine/2004-08-09-RemInfLoop.llx b/test/Transforms/InstCombine/2004-08-09-RemInfLoop.llx
new file mode 100644
index 0000000..98b35fe
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-08-09-RemInfLoop.llx
@@ -0,0 +1,8 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine
+
+; This testcase should not send the instcombiner into an infinite loop!
+
+int %test(int %X) {
+ %Y = rem int %X, 0
+ ret int %Y
+}
diff --git a/test/Transforms/InstCombine/2004-08-10-BoolSetCC.ll b/test/Transforms/InstCombine/2004-08-10-BoolSetCC.ll
new file mode 100644
index 0000000..e87e42c
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-08-10-BoolSetCC.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep {ret i1 false}
+bool %test(bool %V) {
+ %Y = setlt bool %V, false
+ ret bool %Y
+}
+
diff --git a/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.llx b/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.llx
new file mode 100644
index 0000000..ddc4039
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.llx
@@ -0,0 +1,21 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -mem2reg | llvm-dis | \
+; RUN: not grep {int 1}
+
+; When propagating the load through the select, make sure that the load is
+; inserted where the original load was, not where the select is. Not doing
+; so could produce incorrect results!
+
+implementation
+
+int %test(bool %C) {
+ %X = alloca int
+ %X2 = alloca int
+ store int 1, int* %X
+ store int 2, int* %X2
+
+ %Y = select bool %C, int* %X, int* %X2
+ store int 3, int* %X
+ %Z = load int* %Y
+ ret int %Z
+}
+
diff --git a/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.llx b/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.llx
new file mode 100644
index 0000000..c3478a8
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.llx
@@ -0,0 +1,24 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -mem2reg -simplifycfg | \
+; RUN: llvm-dis | grep -v store | not grep {int 1}
+
+; Test to make sure that instcombine does not accidentally propagate the load
+; into the PHI, which would break the program.
+
+int %test(bool %C) {
+entry:
+ %X = alloca int
+ %X2 = alloca int
+ store int 1, int* %X
+ store int 2, int* %X2
+ br bool %C, label %cond_true.i, label %cond_continue.i
+
+cond_true.i:
+ br label %cond_continue.i
+
+cond_continue.i:
+ %mem_tmp.i.0 = phi int* [ %X, %cond_true.i ], [ %X2, %entry ]
+ store int 3, int* %X
+ %tmp.3 = load int* %mem_tmp.i.0
+ ret int %tmp.3
+}
+
diff --git a/test/Transforms/InstCombine/2004-09-28-BadShiftAndSetCC.llx b/test/Transforms/InstCombine/2004-09-28-BadShiftAndSetCC.llx
new file mode 100644
index 0000000..bb55b5c
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-09-28-BadShiftAndSetCC.llx
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep -- -65536
+
+bool %test(int %tmp.124) {
+ %tmp.125 = shl int %tmp.124, ubyte 8
+ %tmp.126.mask = and int %tmp.125, -16777216 ; <int> [#uses=1]
+ %tmp.128 = seteq int %tmp.126.mask, 167772160 ; <bool> [#uses=1]
+ ret bool %tmp.128
+}
+
diff --git a/test/Transforms/InstCombine/2004-11-22-Missed-and-fold.ll b/test/Transforms/InstCombine/2004-11-22-Missed-and-fold.ll
new file mode 100644
index 0000000..1a741c9
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-11-22-Missed-and-fold.ll
@@ -0,0 +1,8 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep and
+
+sbyte %test21(sbyte %A) {
+ %C = shr sbyte %A, ubyte 7 ;; sign extend
+ %D = and sbyte %C, 1 ;; chop off sign
+ ret sbyte %D
+}
+
diff --git a/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll b/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll
new file mode 100644
index 0000000..7e12bbf
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll
@@ -0,0 +1,136 @@
+; This test case tests the InstructionCombining optimization that
+; reduces things like:
+; %Y = cast sbyte %X to uint
+; %C = setlt uint %Y, 1024
+; to
+; %C = bool true
+; It includes test cases for different constant values, signedness of the
+; cast operands, and types of setCC operators. In all cases, the cast should
+; be eliminated. In many cases the setCC is also eliminated based on the
+; constant value and the range of the casted value.
+;
+; RUN: llvm-upgrade %s -o - | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: notcast .*int
+; END.
+
+implementation ; Functions:
+
+bool %lt_signed_to_large_unsigned(sbyte %SB) {
+ %Y = cast sbyte %SB to uint ; <uint> [#uses=1]
+ %C = setlt uint %Y, 1024 ; <bool> [#uses=1]
+ ret bool %C
+}
+
+bool %lt_signed_to_large_signed(sbyte %SB) {
+ %Y = cast sbyte %SB to int
+ %C = setlt int %Y, 1024
+ ret bool %C
+}
+
+bool %lt_signed_to_large_negative(sbyte %SB) {
+ %Y = cast sbyte %SB to int
+ %C = setlt int %Y, -1024
+ ret bool %C
+}
+
+bool %lt_signed_to_small_signed(sbyte %SB) {
+ %Y = cast sbyte %SB to int
+ %C = setlt int %Y, 17
+ ret bool %C
+}
+
+bool %lt_signed_to_small_negative(sbyte %SB) {
+ %Y = cast sbyte %SB to int
+ %C = setlt int %Y, -17
+ ret bool %C
+}
+
+bool %lt_unsigned_to_large_unsigned(ubyte %SB) {
+ %Y = cast ubyte %SB to uint ; <uint> [#uses=1]
+ %C = setlt uint %Y, 1024 ; <bool> [#uses=1]
+ ret bool %C
+}
+
+bool %lt_unsigned_to_large_signed(ubyte %SB) {
+ %Y = cast ubyte %SB to int
+ %C = setlt int %Y, 1024
+ ret bool %C
+}
+
+bool %lt_unsigned_to_large_negative(ubyte %SB) {
+ %Y = cast ubyte %SB to int
+ %C = setlt int %Y, -1024
+ ret bool %C
+}
+
+bool %lt_unsigned_to_small_unsigned(ubyte %SB) {
+ %Y = cast ubyte %SB to uint ; <uint> [#uses=1]
+ %C = setlt uint %Y, 17 ; <bool> [#uses=1]
+ ret bool %C
+}
+
+bool %lt_unsigned_to_small_negative(ubyte %SB) {
+ %Y = cast ubyte %SB to int
+ %C = setlt int %Y, -17
+ ret bool %C
+}
+
+bool %gt_signed_to_large_unsigned(sbyte %SB) {
+ %Y = cast sbyte %SB to uint ; <uint> [#uses=1]
+ %C = setgt uint %Y, 1024 ; <bool> [#uses=1]
+ ret bool %C
+}
+
+bool %gt_signed_to_large_signed(sbyte %SB) {
+ %Y = cast sbyte %SB to int
+ %C = setgt int %Y, 1024
+ ret bool %C
+}
+
+bool %gt_signed_to_large_negative(sbyte %SB) {
+ %Y = cast sbyte %SB to int
+ %C = setgt int %Y, -1024
+ ret bool %C
+}
+
+bool %gt_signed_to_small_signed(sbyte %SB) {
+ %Y = cast sbyte %SB to int
+ %C = setgt int %Y, 17
+ ret bool %C
+}
+
+bool %gt_signed_to_small_negative(sbyte %SB) {
+ %Y = cast sbyte %SB to int
+ %C = setgt int %Y, -17
+ ret bool %C
+}
+
+bool %gt_unsigned_to_large_unsigned(ubyte %SB) {
+ %Y = cast ubyte %SB to uint ; <uint> [#uses=1]
+ %C = setgt uint %Y, 1024 ; <bool> [#uses=1]
+ ret bool %C
+}
+
+bool %gt_unsigned_to_large_signed(ubyte %SB) {
+ %Y = cast ubyte %SB to int
+ %C = setgt int %Y, 1024
+ ret bool %C
+}
+
+bool %gt_unsigned_to_large_negative(ubyte %SB) {
+ %Y = cast ubyte %SB to int
+ %C = setgt int %Y, -1024
+ ret bool %C
+}
+
+bool %gt_unsigned_to_small_unsigned(ubyte %SB) {
+ %Y = cast ubyte %SB to uint ; <uint> [#uses=1]
+ %C = setgt uint %Y, 17 ; <bool> [#uses=1]
+ ret bool %C
+}
+
+bool %gt_unsigned_to_small_negative(ubyte %SB) {
+ %Y = cast ubyte %SB to int
+ %C = setgt int %Y, -17
+ ret bool %C
+}
diff --git a/test/Transforms/InstCombine/2004-12-08-InstCombineCrash.ll b/test/Transforms/InstCombine/2004-12-08-InstCombineCrash.ll
new file mode 100644
index 0000000..6e241f4
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-12-08-InstCombineCrash.ll
@@ -0,0 +1,14 @@
+; RUN: llvm-upgrade %s -o - | llvm-as | opt -instcombine
+
+ %struct.llvm_java_object_base = type opaque
+ "java/lang/Object" = type { %struct.llvm_java_object_base }
+ "java/lang/StringBuffer" = type { "java/lang/Object", int, { "java/lang/Object", uint, [0 x ushort] }*, bool }
+
+implementation ; Functions:
+
+void "java/lang/StringBuffer/append(Ljava/lang/String;)Ljava/lang/StringBuffer;"() {
+bc0:
+ %tmp53 = getelementptr "java/lang/StringBuffer"* null, int 0, uint 1 ; <int*> [#uses=1]
+ store int 0, int* %tmp53
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2004-12-08-RemInfiniteLoop.ll b/test/Transforms/InstCombine/2004-12-08-RemInfiniteLoop.ll
new file mode 100644
index 0000000..fb18ea2
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-12-08-RemInfiniteLoop.ll
@@ -0,0 +1,6 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine
+
+int %test(int %X) {
+ %Y = rem int %X, undef
+ ret int %Y
+}
diff --git a/test/Transforms/InstCombine/2005-03-04-ShiftOverflow.ll b/test/Transforms/InstCombine/2005-03-04-ShiftOverflow.ll
new file mode 100644
index 0000000..9a754d8
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-03-04-ShiftOverflow.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep {ret bool false}
+
+bool %test(ulong %tmp.169) {
+ %tmp.1710 = shr ulong %tmp.169, ubyte 1
+ %tmp.1912 = setgt ulong %tmp.1710, 0
+ ret bool %tmp.1912
+}
+
diff --git a/test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll b/test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll
new file mode 100644
index 0000000..8e523d3
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+uint %test(bool %C, uint %tmp.15) {
+ %tmp.16 = select bool %C, uint 8, uint 1
+ %tmp.18 = div uint %tmp.15, %tmp.16
+ ret uint %tmp.18
+}
diff --git a/test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll b/test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll
new file mode 100644
index 0000000..fa862fe
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+int %_Z13func_31585107li(int %l_39521025, int %l_59244666) {
+ %shortcirc_val = select bool false, uint 1, uint 0 ; <uint> [#uses=1]
+ %tmp.8 = div uint 0, %shortcirc_val ; <uint> [#uses=1]
+ %tmp.9 = seteq uint %tmp.8, 0 ; <bool> [#uses=1]
+ %retval = select bool %tmp.9, int %l_59244666, int -1621308501 ; <int> [#uses=1]
+ ret int %retval
+}
diff --git a/test/Transforms/InstCombine/2005-06-15-ShiftSetCCCrash.ll b/test/Transforms/InstCombine/2005-06-15-ShiftSetCCCrash.ll
new file mode 100644
index 0000000..deb6124
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-06-15-ShiftSetCCCrash.ll
@@ -0,0 +1,8 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+; PR577
+
+bool %test() {
+ %tmp.3 = shl int 0, ubyte 41 ; <int> [#uses=1]
+ %tmp.4 = setne int %tmp.3, 0 ; <bool> [#uses=1]
+ ret bool %tmp.4
+}
diff --git a/test/Transforms/InstCombine/2005-06-16-RangeCrash.ll b/test/Transforms/InstCombine/2005-06-16-RangeCrash.ll
new file mode 100644
index 0000000..ef44301
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-06-16-RangeCrash.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+; PR585
+bool %test() {
+ %tmp.26 = div int 0, -2147483648 ; <int> [#uses=1]
+ %tmp.27 = seteq int %tmp.26, 0 ; <bool> [#uses=1]
+ ret bool %tmp.27
+}
diff --git a/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll b/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
new file mode 100644
index 0000000..764b035
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep {ret i1 true}
+; PR586
+
+%g_07918478 = external global uint ; <uint*> [#uses=1]
+
+implementation ; Functions:
+
+bool %test() {
+ %tmp.0 = load uint* %g_07918478 ; <uint> [#uses=2]
+ %tmp.1 = setne uint %tmp.0, 0 ; <bool> [#uses=1]
+ %tmp.4 = setlt uint %tmp.0, 4111 ; <bool> [#uses=1]
+ %bothcond = or bool %tmp.1, %tmp.4 ; <bool> [#uses=1]
+ ret bool %bothcond
+}
+
diff --git a/test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll b/test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll
new file mode 100644
index 0000000..be9837d
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll
@@ -0,0 +1,13 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+; This example caused instcombine to spin into an infinite loop.
+
+void %test(int *%P) {
+ ret void
+Dead:
+ %X = phi int [%Y, %Dead]
+ %Y = div int %X, 10
+ store int %Y, int* %P
+ br label %Dead
+}
+
diff --git a/test/Transforms/InstCombine/2006-02-07-SextZextCrash.ll b/test/Transforms/InstCombine/2006-02-07-SextZextCrash.ll
new file mode 100644
index 0000000..e06dca9
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-02-07-SextZextCrash.ll
@@ -0,0 +1,22 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+ %struct.rtx_const = type { uint, { %union.real_extract } }
+ %struct.rtx_def = type { int, [1 x %union.rtunion_def] }
+ %union.real_extract = type { double }
+ %union.rtunion_def = type { uint }
+
+implementation ; Functions:
+
+fastcc void %decode_rtx_const(%struct.rtx_def* %x, %struct.rtx_const* %value) {
+ %tmp.54 = getelementptr %struct.rtx_const* %value, int 0, uint 0 ; <uint*> [#uses=1]
+ %tmp.56 = getelementptr %struct.rtx_def* %x, int 0, uint 0 ; <int*> [#uses=1]
+ %tmp.57 = load int* %tmp.56 ; <int> [#uses=1]
+ %tmp.58 = shl int %tmp.57, ubyte 8 ; <int> [#uses=1]
+ %tmp.59 = shr int %tmp.58, ubyte 24 ; <int> [#uses=1]
+ %tmp.60 = cast int %tmp.59 to ushort ; <ushort> [#uses=1]
+ %tmp.61 = cast ushort %tmp.60 to uint ; <uint> [#uses=1]
+ %tmp.62 = shl uint %tmp.61, ubyte 16 ; <uint> [#uses=1]
+ %tmp.65 = or uint 0, %tmp.62 ; <uint> [#uses=1]
+ store uint %tmp.65, uint* %tmp.54
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2006-02-13-DemandedMiscompile.ll b/test/Transforms/InstCombine/2006-02-13-DemandedMiscompile.ll
new file mode 100644
index 0000000..73ad700
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-02-13-DemandedMiscompile.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep undef
+
+int %test(sbyte %A) {
+ %B = cast sbyte %A to int
+ %C = shr int %B, ubyte 8
+ ret int %C
+}
+
diff --git a/test/Transforms/InstCombine/2006-02-28-Crash.ll b/test/Transforms/InstCombine/2006-02-28-Crash.ll
new file mode 100644
index 0000000..241c254
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-02-28-Crash.ll
@@ -0,0 +1,6 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+int %test() {
+ %tmp203 = seteq uint 1, 2 ; <bool> [#uses=1]
+ %tmp203 = cast bool %tmp203 to int ; <int> [#uses=1]
+ ret int %tmp203
+}
diff --git a/test/Transforms/InstCombine/2006-03-30-ExtractElement.ll b/test/Transforms/InstCombine/2006-03-30-ExtractElement.ll
new file mode 100644
index 0000000..3149460
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-03-30-ExtractElement.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+
+float %test(<4 x float> %V) {
+ %V2 = insertelement <4 x float> %V, float 1.0, uint 3
+ %R = extractelement <4 x float> %V2, uint 2
+ ret float %R
+}
diff --git a/test/Transforms/InstCombine/2006-04-01-InfLoop.ll b/test/Transforms/InstCombine/2006-04-01-InfLoop.ll
new file mode 100644
index 0000000..81c1690
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-04-01-InfLoop.ll
@@ -0,0 +1,442 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+; END.
+
+ %struct.DecRefPicMarking_s = type { int, int, int, int, int, %struct.DecRefPicMarking_s* }
+ %struct.datapartition = type { %typedef.Bitstream*, %typedef.DecodingEnvironment, int (%struct.syntaxelement*, %struct.img_par*, %struct.inp_par*, %struct.datapartition*)* }
+ %struct.img_par = type { int, uint, uint, int, int*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, [16 x [16 x ushort]], [6 x [32 x int]], [16 x [16 x int]], [4 x [12 x [4 x [4 x int]]]], [16 x int], int**, int*, int***, int**, int, int, int, int, %typedef.Slice*, %struct.macroblock*, int, int, int, int, int, int, int**, %struct.DecRefPicMarking_s*, int, int, int, int, int, int, int, uint, int, int, int, uint, uint, uint, uint, int, [3 x int], int, uint, int, uint, int, int, int, uint, uint, int, int, int, int, uint, uint, int***, int***, int****, int, int, uint, int, int, int, int, uint, uint, uint, uint, uint, uint, uint, int, int, int, int, int, int, int, int, int, int, int, uint, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, %struct.timeb, %struct.timeb, int, int, int, int, int, uint, int, int }
+ %struct.inp_par = type { [100 x sbyte], [100 x sbyte], [100 x sbyte], int, int, int, int, int, int, int }
+ %struct.macroblock = type { int, int, int, %struct.macroblock*, %struct.macroblock*, int, [2 x [4 x [4 x [2 x int]]]], int, long, long, int, int, [4 x int], [4 x int], int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int }
+ %struct.pix_pos = type { int, int, int, int, int, int }
+ %struct.storable_picture = type { uint, int, int, int, int, [50 x [6 x [33 x long]]], [50 x [6 x [33 x long]]], [50 x [6 x [33 x long]]], [50 x [6 x [33 x long]]], uint, int, int, int, int, int, int, int, short, int, int, int, int, int, int, int, uint, uint, ushort**, ushort***, ubyte*, short**, sbyte***, long***, long***, short****, ubyte**, ubyte**, %struct.storable_picture*, %struct.storable_picture*, %struct.storable_picture*, int, int, int, int, int, int, int, int, int, int, int, int, int, [2 x int], int, %struct.DecRefPicMarking_s*, int }
+ %struct.syntaxelement = type { int, int, int, int, int, uint, int, int, void (int, int, int*, int*)*, void (%struct.syntaxelement*, %struct.inp_par*, %struct.img_par*, %typedef.DecodingEnvironment*)* }
+ %struct.timeb = type { int, ushort, short, short }
+ %typedef.BiContextType = type { ushort, ubyte }
+ %typedef.Bitstream = type { int, int, int, int, ubyte*, int }
+ %typedef.DecodingEnvironment = type { uint, uint, uint, uint, int, ubyte*, int* }
+ %typedef.MotionInfoContexts = type { [4 x [11 x %typedef.BiContextType]], [2 x [9 x %typedef.BiContextType]], [2 x [10 x %typedef.BiContextType]], [2 x [6 x %typedef.BiContextType]], [4 x %typedef.BiContextType], [4 x %typedef.BiContextType], [3 x %typedef.BiContextType] }
+ %typedef.Slice = type { int, int, int, int, uint, int, int, int, int, %struct.datapartition*, %typedef.MotionInfoContexts*, %typedef.TextureInfoContexts*, int, int*, int*, int*, int, int*, int*, int*, int (%struct.img_par*, %struct.inp_par*)*, int, int, int, int }
+ %typedef.TextureInfoContexts = type { [2 x %typedef.BiContextType], [4 x %typedef.BiContextType], [3 x [4 x %typedef.BiContextType]], [10 x [4 x %typedef.BiContextType]], [10 x [15 x %typedef.BiContextType]], [10 x [15 x %typedef.BiContextType]], [10 x [5 x %typedef.BiContextType]], [10 x [5 x %typedef.BiContextType]], [10 x [15 x %typedef.BiContextType]], [10 x [15 x %typedef.BiContextType]] }
+%dec_picture = external global %struct.storable_picture* ; <%struct.storable_picture**> [#uses=1]
+%last_dquant = external global int ; <int*> [#uses=1]
+
+implementation ; Functions:
+
+void %readCBP_CABAC(%struct.syntaxelement* %se, %struct.inp_par* %inp, %struct.img_par* %img.1, %typedef.DecodingEnvironment* %dep_dp) {
+entry:
+ %block_a = alloca %struct.pix_pos ; <%struct.pix_pos*> [#uses=5]
+ %tmp.1 = getelementptr %struct.img_par* %img.1, int 0, uint 37 ; <%typedef.Slice**> [#uses=1]
+ %tmp.2 = load %typedef.Slice** %tmp.1 ; <%typedef.Slice*> [#uses=1]
+ %tmp.3 = getelementptr %typedef.Slice* %tmp.2, int 0, uint 11 ; <%typedef.TextureInfoContexts**> [#uses=1]
+ %tmp.4 = load %typedef.TextureInfoContexts** %tmp.3 ; <%typedef.TextureInfoContexts*> [#uses=3]
+ %tmp.6 = getelementptr %struct.img_par* %img.1, int 0, uint 38 ; <%struct.macroblock**> [#uses=1]
+ %tmp.7 = load %struct.macroblock** %tmp.6 ; <%struct.macroblock*> [#uses=1]
+ %tmp.9 = getelementptr %struct.img_par* %img.1, int 0, uint 1 ; <uint*> [#uses=1]
+ %tmp.10 = load uint* %tmp.9 ; <uint> [#uses=1]
+ %tmp.11 = cast uint %tmp.10 to int ; <int> [#uses=1]
+ %tmp.12 = getelementptr %struct.macroblock* %tmp.7, int %tmp.11 ; <%struct.macroblock*> [#uses=18]
+ br label %loopentry.0
+
+loopentry.0: ; preds = %loopexit.1, %entry
+ %mask.1 = phi int [ undef, %entry ], [ %mask.0, %loopexit.1 ] ; <int> [#uses=1]
+ %cbp_bit.1 = phi int [ undef, %entry ], [ %cbp_bit.0, %loopexit.1 ] ; <int> [#uses=1]
+ %cbp.2 = phi int [ 0, %entry ], [ %cbp.1, %loopexit.1 ] ; <int> [#uses=5]
+ %curr_cbp_ctx.1 = phi int [ undef, %entry ], [ %curr_cbp_ctx.0, %loopexit.1 ] ; <int> [#uses=1]
+ %b.2 = phi int [ undef, %entry ], [ %b.1, %loopexit.1 ] ; <int> [#uses=1]
+ %a.2 = phi int [ undef, %entry ], [ %a.1, %loopexit.1 ] ; <int> [#uses=1]
+ %mb_y.0 = phi int [ 0, %entry ], [ %tmp.152, %loopexit.1 ] ; <int> [#uses=7]
+ %mb_x.0 = phi int [ undef, %entry ], [ %mb_x.1, %loopexit.1 ] ; <int> [#uses=0]
+ %tmp.14 = setle int %mb_y.0, 3 ; <bool> [#uses=2]
+ %tmp.15 = cast bool %tmp.14 to int ; <int> [#uses=0]
+ br bool %tmp.14, label %no_exit.0, label %loopexit.0
+
+no_exit.0: ; preds = %loopentry.0
+ br label %loopentry.1
+
+loopentry.1: ; preds = %endif.7, %no_exit.0
+ %mask.0 = phi int [ %mask.1, %no_exit.0 ], [ %tmp.131, %endif.7 ] ; <int> [#uses=1]
+ %cbp_bit.0 = phi int [ %cbp_bit.1, %no_exit.0 ], [ %tmp.142, %endif.7 ] ; <int> [#uses=1]
+ %cbp.1 = phi int [ %cbp.2, %no_exit.0 ], [ %cbp.0, %endif.7 ] ; <int> [#uses=5]
+ %curr_cbp_ctx.0 = phi int [ %curr_cbp_ctx.1, %no_exit.0 ], [ %tmp.125, %endif.7 ] ; <int> [#uses=1]
+ %b.1 = phi int [ %b.2, %no_exit.0 ], [ %b.0, %endif.7 ] ; <int> [#uses=1]
+ %a.1 = phi int [ %a.2, %no_exit.0 ], [ %a.0, %endif.7 ] ; <int> [#uses=1]
+ %mb_x.1 = phi int [ 0, %no_exit.0 ], [ %tmp.150, %endif.7 ] ; <int> [#uses=9]
+ %tmp.17 = setle int %mb_x.1, 3 ; <bool> [#uses=2]
+ %tmp.18 = cast bool %tmp.17 to int ; <int> [#uses=0]
+ br bool %tmp.17, label %no_exit.1, label %loopexit.1
+
+no_exit.1: ; preds = %loopentry.1
+ %tmp.20 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 12 ; <[4 x int]*> [#uses=1]
+ %tmp.22 = div int %mb_x.1, 2 ; <int> [#uses=1]
+ %tmp.24 = add int %tmp.22, %mb_y.0 ; <int> [#uses=1]
+ %tmp.25 = getelementptr [4 x int]* %tmp.20, int 0, int %tmp.24 ; <int*> [#uses=1]
+ %tmp.26 = load int* %tmp.25 ; <int> [#uses=1]
+ %tmp.27 = seteq int %tmp.26, 11 ; <bool> [#uses=2]
+ %tmp.28 = cast bool %tmp.27 to int ; <int> [#uses=0]
+ br bool %tmp.27, label %then.0, label %else.0
+
+then.0: ; preds = %no_exit.1
+ br label %endif.0
+
+else.0: ; preds = %no_exit.1
+ br label %endif.0
+
+endif.0: ; preds = %else.0, %then.0
+ %tmp.30 = seteq int %mb_y.0, 0 ; <bool> [#uses=2]
+ %tmp.31 = cast bool %tmp.30 to int ; <int> [#uses=0]
+ br bool %tmp.30, label %then.1, label %else.1
+
+then.1: ; preds = %endif.0
+ %tmp.33 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 3 ; <%struct.macroblock**> [#uses=1]
+ %tmp.34 = load %struct.macroblock** %tmp.33 ; <%struct.macroblock*> [#uses=1]
+ %tmp.35 = cast %struct.macroblock* %tmp.34 to sbyte* ; <sbyte*> [#uses=1]
+ %tmp.36 = seteq sbyte* %tmp.35, null ; <bool> [#uses=2]
+ %tmp.37 = cast bool %tmp.36 to int ; <int> [#uses=0]
+ br bool %tmp.36, label %then.2, label %else.2
+
+then.2: ; preds = %then.1
+ br label %endif.1
+
+else.2: ; preds = %then.1
+ %tmp.39 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 3 ; <%struct.macroblock**> [#uses=1]
+ %tmp.40 = load %struct.macroblock** %tmp.39 ; <%struct.macroblock*> [#uses=1]
+ %tmp.41 = getelementptr %struct.macroblock* %tmp.40, int 0, uint 5 ; <int*> [#uses=1]
+ %tmp.42 = load int* %tmp.41 ; <int> [#uses=1]
+ %tmp.43 = seteq int %tmp.42, 14 ; <bool> [#uses=2]
+ %tmp.44 = cast bool %tmp.43 to int ; <int> [#uses=0]
+ br bool %tmp.43, label %then.3, label %else.3
+
+then.3: ; preds = %else.2
+ br label %endif.1
+
+else.3: ; preds = %else.2
+ %tmp.46 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 3 ; <%struct.macroblock**> [#uses=1]
+ %tmp.47 = load %struct.macroblock** %tmp.46 ; <%struct.macroblock*> [#uses=1]
+ %tmp.48 = getelementptr %struct.macroblock* %tmp.47, int 0, uint 7 ; <int*> [#uses=1]
+ %tmp.49 = load int* %tmp.48 ; <int> [#uses=1]
+ %tmp.51 = div int %mb_x.1, 2 ; <int> [#uses=1]
+ %tmp.52 = add int %tmp.51, 2 ; <int> [#uses=1]
+ %tmp.53 = cast int %tmp.52 to ubyte ; <ubyte> [#uses=1]
+ %tmp.54 = shr int %tmp.49, ubyte %tmp.53 ; <int> [#uses=1]
+ %tmp.55 = cast int %tmp.54 to uint ; <uint> [#uses=1]
+ %tmp.57 = xor uint %tmp.55, 1 ; <uint> [#uses=1]
+ %tmp.58 = cast uint %tmp.57 to int ; <int> [#uses=1]
+ %tmp.59 = and int %tmp.58, 1 ; <int> [#uses=1]
+ br label %endif.1
+
+else.1: ; preds = %endif.0
+ %tmp.62 = div int %mb_x.1, 2 ; <int> [#uses=1]
+ %tmp.63 = cast int %tmp.62 to ubyte ; <ubyte> [#uses=1]
+ %tmp.64 = shr int %cbp.1, ubyte %tmp.63 ; <int> [#uses=1]
+ %tmp.65 = cast int %tmp.64 to uint ; <uint> [#uses=1]
+ %tmp.67 = xor uint %tmp.65, 1 ; <uint> [#uses=1]
+ %tmp.68 = cast uint %tmp.67 to int ; <int> [#uses=1]
+ %tmp.69 = and int %tmp.68, 1 ; <int> [#uses=1]
+ br label %endif.1
+
+endif.1: ; preds = %else.1, %else.3, %then.3, %then.2
+ %b.0 = phi int [ 0, %then.2 ], [ 0, %then.3 ], [ %tmp.59, %else.3 ], [ %tmp.69, %else.1 ] ; <int> [#uses=2]
+ %tmp.71 = seteq int %mb_x.1, 0 ; <bool> [#uses=2]
+ %tmp.72 = cast bool %tmp.71 to int ; <int> [#uses=0]
+ br bool %tmp.71, label %then.4, label %else.4
+
+then.4: ; preds = %endif.1
+ %tmp.74 = getelementptr %struct.img_par* %img.1, int 0, uint 1 ; <uint*> [#uses=1]
+ %tmp.75 = load uint* %tmp.74 ; <uint> [#uses=1]
+ %tmp.76 = cast uint %tmp.75 to int ; <int> [#uses=1]
+ call void %getLuma4x4Neighbour( int %tmp.76, int %mb_x.1, int %mb_y.0, int -1, int 0, %struct.pix_pos* %block_a )
+ %tmp.79 = getelementptr %struct.pix_pos* %block_a, int 0, uint 0 ; <int*> [#uses=1]
+ %tmp.80 = load int* %tmp.79 ; <int> [#uses=1]
+ %tmp.81 = setne int %tmp.80, 0 ; <bool> [#uses=2]
+ %tmp.82 = cast bool %tmp.81 to int ; <int> [#uses=0]
+ br bool %tmp.81, label %then.5, label %else.5
+
+then.5: ; preds = %then.4
+ %tmp.84 = getelementptr %struct.img_par* %img.1, int 0, uint 38 ; <%struct.macroblock**> [#uses=1]
+ %tmp.85 = load %struct.macroblock** %tmp.84 ; <%struct.macroblock*> [#uses=1]
+ %tmp.86 = getelementptr %struct.pix_pos* %block_a, int 0, uint 1 ; <int*> [#uses=1]
+ %tmp.87 = load int* %tmp.86 ; <int> [#uses=1]
+ %tmp.88 = getelementptr %struct.macroblock* %tmp.85, int %tmp.87 ; <%struct.macroblock*> [#uses=1]
+ %tmp.89 = getelementptr %struct.macroblock* %tmp.88, int 0, uint 5 ; <int*> [#uses=1]
+ %tmp.90 = load int* %tmp.89 ; <int> [#uses=1]
+ %tmp.91 = seteq int %tmp.90, 14 ; <bool> [#uses=2]
+ %tmp.92 = cast bool %tmp.91 to int ; <int> [#uses=0]
+ br bool %tmp.91, label %then.6, label %else.6
+
+then.6: ; preds = %then.5
+ br label %endif.4
+
+else.6: ; preds = %then.5
+ %tmp.94 = getelementptr %struct.img_par* %img.1, int 0, uint 38 ; <%struct.macroblock**> [#uses=1]
+ %tmp.95 = load %struct.macroblock** %tmp.94 ; <%struct.macroblock*> [#uses=1]
+ %tmp.96 = getelementptr %struct.pix_pos* %block_a, int 0, uint 1 ; <int*> [#uses=1]
+ %tmp.97 = load int* %tmp.96 ; <int> [#uses=1]
+ %tmp.98 = getelementptr %struct.macroblock* %tmp.95, int %tmp.97 ; <%struct.macroblock*> [#uses=1]
+ %tmp.99 = getelementptr %struct.macroblock* %tmp.98, int 0, uint 7 ; <int*> [#uses=1]
+ %tmp.100 = load int* %tmp.99 ; <int> [#uses=1]
+ %tmp.101 = getelementptr %struct.pix_pos* %block_a, int 0, uint 3 ; <int*> [#uses=1]
+ %tmp.102 = load int* %tmp.101 ; <int> [#uses=1]
+ %tmp.103 = div int %tmp.102, 2 ; <int> [#uses=1]
+ %tmp.104 = mul int %tmp.103, 2 ; <int> [#uses=1]
+ %tmp.105 = add int %tmp.104, 1 ; <int> [#uses=1]
+ %tmp.106 = cast int %tmp.105 to ubyte ; <ubyte> [#uses=1]
+ %tmp.107 = shr int %tmp.100, ubyte %tmp.106 ; <int> [#uses=1]
+ %tmp.108 = cast int %tmp.107 to uint ; <uint> [#uses=1]
+ %tmp.110 = xor uint %tmp.108, 1 ; <uint> [#uses=1]
+ %tmp.111 = cast uint %tmp.110 to int ; <int> [#uses=1]
+ %tmp.112 = and int %tmp.111, 1 ; <int> [#uses=1]
+ br label %endif.4
+
+else.5: ; preds = %then.4
+ br label %endif.4
+
+else.4: ; preds = %endif.1
+ %tmp.115 = cast int %mb_y.0 to ubyte ; <ubyte> [#uses=1]
+ %tmp.116 = shr int %cbp.1, ubyte %tmp.115 ; <int> [#uses=1]
+ %tmp.117 = cast int %tmp.116 to uint ; <uint> [#uses=1]
+ %tmp.119 = xor uint %tmp.117, 1 ; <uint> [#uses=1]
+ %tmp.120 = cast uint %tmp.119 to int ; <int> [#uses=1]
+ %tmp.121 = and int %tmp.120, 1 ; <int> [#uses=1]
+ br label %endif.4
+
+endif.4: ; preds = %else.4, %else.5, %else.6, %then.6
+ %a.0 = phi int [ 0, %then.6 ], [ %tmp.112, %else.6 ], [ 0, %else.5 ], [ %tmp.121, %else.4 ] ; <int> [#uses=2]
+ %tmp.123 = mul int %b.0, 2 ; <int> [#uses=1]
+ %tmp.125 = add int %tmp.123, %a.0 ; <int> [#uses=2]
+ %tmp.127 = div int %mb_x.1, 2 ; <int> [#uses=1]
+ %tmp.129 = add int %tmp.127, %mb_y.0 ; <int> [#uses=1]
+ %tmp.130 = cast int %tmp.129 to ubyte ; <ubyte> [#uses=1]
+ %tmp.131 = shl int 1, ubyte %tmp.130 ; <int> [#uses=2]
+ %tmp.135 = getelementptr %typedef.TextureInfoContexts* %tmp.4, int 0, uint 2 ; <[3 x [4 x %typedef.BiContextType]]*> [#uses=1]
+ %tmp.136 = getelementptr [3 x [4 x %typedef.BiContextType]]* %tmp.135, int 0, int 0 ; <[4 x %typedef.BiContextType]*> [#uses=1]
+ %tmp.137 = getelementptr [4 x %typedef.BiContextType]* %tmp.136, int 0, int 0 ; <%typedef.BiContextType*> [#uses=1]
+ %tmp.139 = cast int %tmp.125 to uint ; <uint> [#uses=1]
+ %tmp.140 = cast uint %tmp.139 to int ; <int> [#uses=1]
+ %tmp.141 = getelementptr %typedef.BiContextType* %tmp.137, int %tmp.140 ; <%typedef.BiContextType*> [#uses=1]
+ %tmp.132 = call uint %biari_decode_symbol( %typedef.DecodingEnvironment* %dep_dp, %typedef.BiContextType* %tmp.141 ) ; <uint> [#uses=1]
+ %tmp.142 = cast uint %tmp.132 to int ; <int> [#uses=2]
+ %tmp.144 = setne int %tmp.142, 0 ; <bool> [#uses=2]
+ %tmp.145 = cast bool %tmp.144 to int ; <int> [#uses=0]
+ br bool %tmp.144, label %then.7, label %endif.7
+
+then.7: ; preds = %endif.4
+ %tmp.148 = add int %cbp.1, %tmp.131 ; <int> [#uses=1]
+ br label %endif.7
+
+endif.7: ; preds = %then.7, %endif.4
+ %cbp.0 = phi int [ %tmp.148, %then.7 ], [ %cbp.1, %endif.4 ] ; <int> [#uses=1]
+ %tmp.150 = add int %mb_x.1, 2 ; <int> [#uses=1]
+ br label %loopentry.1
+
+loopexit.1: ; preds = %loopentry.1
+ %tmp.152 = add int %mb_y.0, 2 ; <int> [#uses=1]
+ br label %loopentry.0
+
+loopexit.0: ; preds = %loopentry.0
+ %tmp.153 = load %struct.storable_picture** %dec_picture ; <%struct.storable_picture*> [#uses=1]
+ %tmp.154 = getelementptr %struct.storable_picture* %tmp.153, int 0, uint 45 ; <int*> [#uses=1]
+ %tmp.155 = load int* %tmp.154 ; <int> [#uses=1]
+ %tmp.156 = setne int %tmp.155, 0 ; <bool> [#uses=2]
+ %tmp.157 = cast bool %tmp.156 to int ; <int> [#uses=0]
+ br bool %tmp.156, label %then.8, label %endif.8
+
+then.8: ; preds = %loopexit.0
+ %tmp.159 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 3 ; <%struct.macroblock**> [#uses=1]
+ %tmp.160 = load %struct.macroblock** %tmp.159 ; <%struct.macroblock*> [#uses=1]
+ %tmp.161 = cast %struct.macroblock* %tmp.160 to sbyte* ; <sbyte*> [#uses=1]
+ %tmp.162 = setne sbyte* %tmp.161, null ; <bool> [#uses=2]
+ %tmp.163 = cast bool %tmp.162 to int ; <int> [#uses=0]
+ br bool %tmp.162, label %then.9, label %endif.9
+
+then.9: ; preds = %then.8
+ %tmp.165 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 3 ; <%struct.macroblock**> [#uses=1]
+ %tmp.166 = load %struct.macroblock** %tmp.165 ; <%struct.macroblock*> [#uses=1]
+ %tmp.167 = getelementptr %struct.macroblock* %tmp.166, int 0, uint 5 ; <int*> [#uses=1]
+ %tmp.168 = load int* %tmp.167 ; <int> [#uses=1]
+ %tmp.169 = seteq int %tmp.168, 14 ; <bool> [#uses=2]
+ %tmp.170 = cast bool %tmp.169 to int ; <int> [#uses=0]
+ br bool %tmp.169, label %then.10, label %else.7
+
+then.10: ; preds = %then.9
+ br label %endif.9
+
+else.7: ; preds = %then.9
+ %tmp.172 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 3 ; <%struct.macroblock**> [#uses=1]
+ %tmp.173 = load %struct.macroblock** %tmp.172 ; <%struct.macroblock*> [#uses=1]
+ %tmp.174 = getelementptr %struct.macroblock* %tmp.173, int 0, uint 7 ; <int*> [#uses=1]
+ %tmp.175 = load int* %tmp.174 ; <int> [#uses=1]
+ %tmp.176 = setgt int %tmp.175, 15 ; <bool> [#uses=1]
+ %tmp.177 = cast bool %tmp.176 to int ; <int> [#uses=1]
+ br label %endif.9
+
+endif.9: ; preds = %else.7, %then.10, %then.8
+ %b.4 = phi int [ 1, %then.10 ], [ %tmp.177, %else.7 ], [ 0, %then.8 ] ; <int> [#uses=1]
+ %tmp.179 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 4 ; <%struct.macroblock**> [#uses=1]
+ %tmp.180 = load %struct.macroblock** %tmp.179 ; <%struct.macroblock*> [#uses=1]
+ %tmp.181 = cast %struct.macroblock* %tmp.180 to sbyte* ; <sbyte*> [#uses=1]
+ %tmp.182 = setne sbyte* %tmp.181, null ; <bool> [#uses=2]
+ %tmp.183 = cast bool %tmp.182 to int ; <int> [#uses=0]
+ br bool %tmp.182, label %then.11, label %endif.11
+
+then.11: ; preds = %endif.9
+ %tmp.185 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 4 ; <%struct.macroblock**> [#uses=1]
+ %tmp.186 = load %struct.macroblock** %tmp.185 ; <%struct.macroblock*> [#uses=1]
+ %tmp.187 = getelementptr %struct.macroblock* %tmp.186, int 0, uint 5 ; <int*> [#uses=1]
+ %tmp.188 = load int* %tmp.187 ; <int> [#uses=1]
+ %tmp.189 = seteq int %tmp.188, 14 ; <bool> [#uses=2]
+ %tmp.190 = cast bool %tmp.189 to int ; <int> [#uses=0]
+ br bool %tmp.189, label %then.12, label %else.8
+
+then.12: ; preds = %then.11
+ br label %endif.11
+
+else.8: ; preds = %then.11
+ %tmp.192 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 4 ; <%struct.macroblock**> [#uses=1]
+ %tmp.193 = load %struct.macroblock** %tmp.192 ; <%struct.macroblock*> [#uses=1]
+ %tmp.194 = getelementptr %struct.macroblock* %tmp.193, int 0, uint 7 ; <int*> [#uses=1]
+ %tmp.195 = load int* %tmp.194 ; <int> [#uses=1]
+ %tmp.196 = setgt int %tmp.195, 15 ; <bool> [#uses=1]
+ %tmp.197 = cast bool %tmp.196 to int ; <int> [#uses=1]
+ br label %endif.11
+
+endif.11: ; preds = %else.8, %then.12, %endif.9
+ %a.4 = phi int [ 1, %then.12 ], [ %tmp.197, %else.8 ], [ 0, %endif.9 ] ; <int> [#uses=1]
+ %tmp.199 = mul int %b.4, 2 ; <int> [#uses=1]
+ %tmp.201 = add int %tmp.199, %a.4 ; <int> [#uses=1]
+ %tmp.205 = getelementptr %typedef.TextureInfoContexts* %tmp.4, int 0, uint 2 ; <[3 x [4 x %typedef.BiContextType]]*> [#uses=1]
+ %tmp.206 = getelementptr [3 x [4 x %typedef.BiContextType]]* %tmp.205, int 0, int 1 ; <[4 x %typedef.BiContextType]*> [#uses=1]
+ %tmp.207 = getelementptr [4 x %typedef.BiContextType]* %tmp.206, int 0, int 0 ; <%typedef.BiContextType*> [#uses=1]
+ %tmp.209 = cast int %tmp.201 to uint ; <uint> [#uses=1]
+ %tmp.210 = cast uint %tmp.209 to int ; <int> [#uses=1]
+ %tmp.211 = getelementptr %typedef.BiContextType* %tmp.207, int %tmp.210 ; <%typedef.BiContextType*> [#uses=1]
+ %tmp.202 = call uint %biari_decode_symbol( %typedef.DecodingEnvironment* %dep_dp, %typedef.BiContextType* %tmp.211 ) ; <uint> [#uses=1]
+ %tmp.212 = cast uint %tmp.202 to int ; <int> [#uses=1]
+ %tmp.214 = setne int %tmp.212, 0 ; <bool> [#uses=2]
+ %tmp.215 = cast bool %tmp.214 to int ; <int> [#uses=0]
+ br bool %tmp.214, label %then.13, label %endif.8
+
+then.13: ; preds = %endif.11
+ %tmp.217 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 3 ; <%struct.macroblock**> [#uses=1]
+ %tmp.218 = load %struct.macroblock** %tmp.217 ; <%struct.macroblock*> [#uses=1]
+ %tmp.219 = cast %struct.macroblock* %tmp.218 to sbyte* ; <sbyte*> [#uses=1]
+ %tmp.220 = setne sbyte* %tmp.219, null ; <bool> [#uses=2]
+ %tmp.221 = cast bool %tmp.220 to int ; <int> [#uses=0]
+ br bool %tmp.220, label %then.14, label %endif.14
+
+then.14: ; preds = %then.13
+ %tmp.223 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 3 ; <%struct.macroblock**> [#uses=1]
+ %tmp.224 = load %struct.macroblock** %tmp.223 ; <%struct.macroblock*> [#uses=1]
+ %tmp.225 = getelementptr %struct.macroblock* %tmp.224, int 0, uint 5 ; <int*> [#uses=1]
+ %tmp.226 = load int* %tmp.225 ; <int> [#uses=1]
+ %tmp.227 = seteq int %tmp.226, 14 ; <bool> [#uses=2]
+ %tmp.228 = cast bool %tmp.227 to int ; <int> [#uses=0]
+ br bool %tmp.227, label %then.15, label %else.9
+
+then.15: ; preds = %then.14
+ br label %endif.14
+
+else.9: ; preds = %then.14
+ %tmp.230 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 3 ; <%struct.macroblock**> [#uses=1]
+ %tmp.231 = load %struct.macroblock** %tmp.230 ; <%struct.macroblock*> [#uses=1]
+ %tmp.232 = getelementptr %struct.macroblock* %tmp.231, int 0, uint 7 ; <int*> [#uses=1]
+ %tmp.233 = load int* %tmp.232 ; <int> [#uses=1]
+ %tmp.234 = setgt int %tmp.233, 15 ; <bool> [#uses=2]
+ %tmp.235 = cast bool %tmp.234 to int ; <int> [#uses=0]
+ br bool %tmp.234, label %then.16, label %endif.14
+
+then.16: ; preds = %else.9
+ %tmp.237 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 3 ; <%struct.macroblock**> [#uses=1]
+ %tmp.238 = load %struct.macroblock** %tmp.237 ; <%struct.macroblock*> [#uses=1]
+ %tmp.239 = getelementptr %struct.macroblock* %tmp.238, int 0, uint 7 ; <int*> [#uses=1]
+ %tmp.240 = load int* %tmp.239 ; <int> [#uses=1]
+ %tmp.242 = shr int %tmp.240, ubyte 4 ; <int> [#uses=1]
+ %tmp.243 = seteq int %tmp.242, 2 ; <bool> [#uses=1]
+ %tmp.244 = cast bool %tmp.243 to int ; <int> [#uses=1]
+ br label %endif.14
+
+endif.14: ; preds = %then.16, %else.9, %then.15, %then.13
+ %b.5 = phi int [ 1, %then.15 ], [ %tmp.244, %then.16 ], [ 0, %else.9 ], [ 0, %then.13 ] ; <int> [#uses=1]
+ %tmp.246 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 4 ; <%struct.macroblock**> [#uses=1]
+ %tmp.247 = load %struct.macroblock** %tmp.246 ; <%struct.macroblock*> [#uses=1]
+ %tmp.248 = cast %struct.macroblock* %tmp.247 to sbyte* ; <sbyte*> [#uses=1]
+ %tmp.249 = setne sbyte* %tmp.248, null ; <bool> [#uses=2]
+ %tmp.250 = cast bool %tmp.249 to int ; <int> [#uses=0]
+ br bool %tmp.249, label %then.17, label %endif.17
+
+then.17: ; preds = %endif.14
+ %tmp.252 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 4 ; <%struct.macroblock**> [#uses=1]
+ %tmp.253 = load %struct.macroblock** %tmp.252 ; <%struct.macroblock*> [#uses=1]
+ %tmp.254 = getelementptr %struct.macroblock* %tmp.253, int 0, uint 5 ; <int*> [#uses=1]
+ %tmp.255 = load int* %tmp.254 ; <int> [#uses=1]
+ %tmp.256 = seteq int %tmp.255, 14 ; <bool> [#uses=2]
+ %tmp.257 = cast bool %tmp.256 to int ; <int> [#uses=0]
+ br bool %tmp.256, label %then.18, label %else.10
+
+then.18: ; preds = %then.17
+ br label %endif.17
+
+else.10: ; preds = %then.17
+ %tmp.259 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 4 ; <%struct.macroblock**> [#uses=1]
+ %tmp.260 = load %struct.macroblock** %tmp.259 ; <%struct.macroblock*> [#uses=1]
+ %tmp.261 = getelementptr %struct.macroblock* %tmp.260, int 0, uint 7 ; <int*> [#uses=1]
+ %tmp.262 = load int* %tmp.261 ; <int> [#uses=1]
+ %tmp.263 = setgt int %tmp.262, 15 ; <bool> [#uses=2]
+ %tmp.264 = cast bool %tmp.263 to int ; <int> [#uses=0]
+ br bool %tmp.263, label %then.19, label %endif.17
+
+then.19: ; preds = %else.10
+ %tmp.266 = getelementptr %struct.macroblock* %tmp.12, int 0, uint 4 ; <%struct.macroblock**> [#uses=1]
+ %tmp.267 = load %struct.macroblock** %tmp.266 ; <%struct.macroblock*> [#uses=1]
+ %tmp.268 = getelementptr %struct.macroblock* %tmp.267, int 0, uint 7 ; <int*> [#uses=1]
+ %tmp.269 = load int* %tmp.268 ; <int> [#uses=1]
+ %tmp.271 = shr int %tmp.269, ubyte 4 ; <int> [#uses=1]
+ %tmp.272 = seteq int %tmp.271, 2 ; <bool> [#uses=1]
+ %tmp.273 = cast bool %tmp.272 to int ; <int> [#uses=1]
+ br label %endif.17
+
+endif.17: ; preds = %then.19, %else.10, %then.18, %endif.14
+ %a.5 = phi int [ 1, %then.18 ], [ %tmp.273, %then.19 ], [ 0, %else.10 ], [ 0, %endif.14 ] ; <int> [#uses=1]
+ %tmp.275 = mul int %b.5, 2 ; <int> [#uses=1]
+ %tmp.277 = add int %tmp.275, %a.5 ; <int> [#uses=1]
+ %tmp.281 = getelementptr %typedef.TextureInfoContexts* %tmp.4, int 0, uint 2 ; <[3 x [4 x %typedef.BiContextType]]*> [#uses=1]
+ %tmp.282 = getelementptr [3 x [4 x %typedef.BiContextType]]* %tmp.281, int 0, int 2 ; <[4 x %typedef.BiContextType]*> [#uses=1]
+ %tmp.283 = getelementptr [4 x %typedef.BiContextType]* %tmp.282, int 0, int 0 ; <%typedef.BiContextType*> [#uses=1]
+ %tmp.285 = cast int %tmp.277 to uint ; <uint> [#uses=1]
+ %tmp.286 = cast uint %tmp.285 to int ; <int> [#uses=1]
+ %tmp.287 = getelementptr %typedef.BiContextType* %tmp.283, int %tmp.286 ; <%typedef.BiContextType*> [#uses=1]
+ %tmp.278 = call uint %biari_decode_symbol( %typedef.DecodingEnvironment* %dep_dp, %typedef.BiContextType* %tmp.287 ) ; <uint> [#uses=1]
+ %tmp.288 = cast uint %tmp.278 to int ; <int> [#uses=1]
+ %tmp.290 = seteq int %tmp.288, 1 ; <bool> [#uses=2]
+ %tmp.291 = cast bool %tmp.290 to int ; <int> [#uses=0]
+ br bool %tmp.290, label %cond_true, label %cond_false
+
+cond_true: ; preds = %endif.17
+ %tmp.293 = add int %cbp.2, 32 ; <int> [#uses=1]
+ br label %cond_continue
+
+cond_false: ; preds = %endif.17
+ %tmp.295 = add int %cbp.2, 16 ; <int> [#uses=1]
+ br label %cond_continue
+
+cond_continue: ; preds = %cond_false, %cond_true
+ %mem_tmp.0 = phi int [ %tmp.293, %cond_true ], [ %tmp.295, %cond_false ] ; <int> [#uses=1]
+ br label %endif.8
+
+endif.8: ; preds = %cond_continue, %endif.11, %loopexit.0
+ %cbp.3 = phi int [ %mem_tmp.0, %cond_continue ], [ %cbp.2, %endif.11 ], [ %cbp.2, %loopexit.0 ] ; <int> [#uses=2]
+ %tmp.298 = getelementptr %struct.syntaxelement* %se, int 0, uint 1 ; <int*> [#uses=1]
+ store int %cbp.3, int* %tmp.298
+ %tmp.301 = seteq int %cbp.3, 0 ; <bool> [#uses=2]
+ %tmp.302 = cast bool %tmp.301 to int ; <int> [#uses=0]
+ br bool %tmp.301, label %then.20, label %return
+
+then.20: ; preds = %endif.8
+ store int 0, int* %last_dquant
+ ret void
+
+return: ; preds = %endif.8
+ ret void
+}
+
+declare uint %biari_decode_symbol(%typedef.DecodingEnvironment*, %typedef.BiContextType*)
+
+declare void %getLuma4x4Neighbour(int, int, int, int, int, %struct.pix_pos*)
diff --git a/test/Transforms/InstCombine/2006-04-28-ShiftShiftLongLong.ll b/test/Transforms/InstCombine/2006-04-28-ShiftShiftLongLong.ll
new file mode 100644
index 0000000..6e25d79
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-04-28-ShiftShiftLongLong.ll
@@ -0,0 +1,10 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep shl
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | notcast
+
+; This cannot be turned into a sign extending cast!
+
+long %test(long %X) {
+ %Y = shl long %X, ubyte 16
+ %Z = shr long %Y, ubyte 16
+ ret long %Z
+}
diff --git a/test/Transforms/InstCombine/2006-05-04-DemandedBitCrash.ll b/test/Transforms/InstCombine/2006-05-04-DemandedBitCrash.ll
new file mode 100644
index 0000000..3e72a18
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-05-04-DemandedBitCrash.ll
@@ -0,0 +1,51 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+; END.
+
+void %test() {
+bb38.i:
+ %varspec.0.i1014 = cast long 123814269237067777 to ulong ; <ulong> [#uses=1]
+ %locspec.0.i1015 = cast int 1 to uint ; <uint> [#uses=2]
+ %tmp51391.i1018 = shr ulong %varspec.0.i1014, ubyte 16 ; <ulong> [#uses=1]
+ %tmp51392.i1019 = cast ulong %tmp51391.i1018 to uint ; <uint> [#uses=2]
+ %tmp51392.mask.i1020 = shr uint %tmp51392.i1019, ubyte 29 ; <uint> [#uses=1]
+ %tmp7.i1021 = and uint %tmp51392.mask.i1020, 1 ; <uint> [#uses=2]
+ %tmp18.i1026 = shr uint %tmp51392.i1019, ubyte 31 ; <uint> [#uses=2]
+ %tmp18.i1027 = cast uint %tmp18.i1026 to ubyte ; <ubyte> [#uses=1]
+ br bool false, label %cond_false1148.i1653, label %bb377.i1259
+
+bb377.i1259: ; preds = %bb38.i
+ br bool false, label %cond_true541.i1317, label %cond_false1148.i1653
+
+cond_true541.i1317: ; preds = %bb377.i1259
+ %tmp545.i1318 = shr uint %locspec.0.i1015, ubyte 10 ; <uint> [#uses=1]
+ %tmp550.i1319 = shr uint %locspec.0.i1015, ubyte 4 ; <uint> [#uses=1]
+ %tmp550551.i1320 = and uint %tmp550.i1319, 63 ; <uint> [#uses=1]
+ %tmp553.i1321 = setlt uint %tmp550551.i1320, 4 ; <bool> [#uses=1]
+ %tmp558.i1322 = seteq uint %tmp7.i1021, 0 ; <bool> [#uses=1]
+ %bothcond.i1326 = or bool %tmp553.i1321, false ; <bool> [#uses=1]
+ %bothcond1.i1327 = or bool %bothcond.i1326, false ; <bool> [#uses=1]
+ %bothcond2.not.i1328 = or bool %bothcond1.i1327, false ; <bool> [#uses=1]
+ %bothcond3.i1329 = or bool %bothcond2.not.i1328, %tmp558.i1322 ; <bool> [#uses=0]
+ br bool false, label %cond_true583.i1333, label %cond_next592.i1337
+
+cond_true583.i1333: ; preds = %cond_true541.i1317
+ br bool false, label %cond_true586.i1335, label %cond_next592.i1337
+
+cond_true586.i1335: ; preds = %cond_true583.i1333
+ br label %cond_true.i
+
+cond_next592.i1337: ; preds = %cond_true583.i1333, %cond_true541.i1317
+ %mask_z.0.i1339 = phi uint [ %tmp18.i1026, %cond_true541.i1317 ], [ 0, %cond_true583.i1333 ] ; <uint> [#uses=0]
+ %tmp594.i1340 = and uint %tmp545.i1318, 15 ; <uint> [#uses=0]
+ br label %cond_true.i
+
+cond_false1148.i1653: ; preds = %bb377.i1259, %bb38.i
+ %tmp1150.i1654 = seteq uint %tmp7.i1021, 0 ; <bool> [#uses=1]
+ %tmp1160.i1656 = seteq ubyte %tmp18.i1027, 0 ; <bool> [#uses=1]
+ %bothcond8.i1658 = or bool %tmp1150.i1654, %tmp1160.i1656 ; <bool> [#uses=1]
+ %bothcond9.i1659 = or bool %bothcond8.i1658, false ; <bool> [#uses=0]
+ br label %cond_true.i
+
+cond_true.i: ; preds = %cond_false1148.i1653, %cond_next592.i1337, %cond_true586.i1335
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2006-05-06-Infloop.ll b/test/Transforms/InstCombine/2006-05-06-Infloop.ll
new file mode 100644
index 0000000..4458d97
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-05-06-Infloop.ll
@@ -0,0 +1,523 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+; END.
+
+ %struct.gs_matrix = type { float, int, float, int, float, int, float, int, float, int, float, int }
+ %struct.gx_bitmap = type { ubyte*, int, int, int }
+ %struct.gx_device = type { int, %struct.gx_device_procs*, sbyte*, int, int, float, float, int, ushort, int, int }
+ %struct.gx_device_memory = type { int, %struct.gx_device_procs*, sbyte*, int, int, float, float, int, ushort, int, int, %struct.gs_matrix, int, ubyte*, ubyte**, int (%struct.gx_device_memory*, int, int, int, int, int)*, int, int, ubyte* }
+ %struct.gx_device_procs = type { int (%struct.gx_device*)*, void (%struct.gx_device*, %struct.gs_matrix*)*, int (%struct.gx_device*)*, int (%struct.gx_device*)*, int (%struct.gx_device*)*, uint (%struct.gx_device*, ushort, ushort, ushort)*, int (%struct.gx_device*, uint, ushort*)*, int (%struct.gx_device*, int, int, int, int, uint)*, int (%struct.gx_device*, %struct.gx_bitmap*, int, int, int, int, uint, uint)*, int (%struct.gx_device*, ubyte*, int, int, int, int, int, int, uint, uint)*, int (%struct.gx_device*, ubyte*, int, int, int, int, int, int)*, int (%struct.gx_device*, int, int, int, int, uint)*, int (%struct.gx_device*, int, int, int, int, int, int, uint)*, int (%struct.gx_device*, %struct.gx_bitmap*, int, int, int, int, int, int, uint, uint)* }
+
+implementation ; Functions:
+
+int %mem_mono_copy_mono(%struct.gx_device* %dev, ubyte* %base, int %sourcex, int %raster, int %x, int %y, int %w, int %h, uint %zero, uint %one) {
+entry:
+ %raster = cast int %raster to uint ; <uint> [#uses=3]
+ %tmp = seteq uint %one, %zero ; <bool> [#uses=1]
+ br bool %tmp, label %cond_true, label %cond_next
+
+cond_true: ; preds = %entry
+ %tmp6 = tail call int %mem_mono_fill_rectangle( %struct.gx_device* %dev, int %x, int %y, int %w, int %h, uint %zero ) ; <int> [#uses=1]
+ ret int %tmp6
+
+cond_next: ; preds = %entry
+ %tmp8 = cast %struct.gx_device* %dev to %struct.gx_device_memory* ; <%struct.gx_device_memory*> [#uses=6]
+ %tmp = getelementptr %struct.gx_device_memory* %tmp8, int 0, uint 15 ; <int (%struct.gx_device_memory*, int, int, int, int, int)**> [#uses=1]
+ %tmp = load int (%struct.gx_device_memory*, int, int, int, int, int)** %tmp ; <int (%struct.gx_device_memory*, int, int, int, int, int)*> [#uses=2]
+ %tmp9 = seteq int (%struct.gx_device_memory*, int, int, int, int, int)* %tmp, %mem_no_fault_proc ; <bool> [#uses=1]
+ br bool %tmp9, label %cond_next46, label %cond_true10
+
+cond_true10: ; preds = %cond_next
+ %tmp16 = add int %x, 7 ; <int> [#uses=1]
+ %tmp17 = add int %tmp16, %w ; <int> [#uses=1]
+ %tmp18 = shr int %tmp17, ubyte 3 ; <int> [#uses=1]
+ %tmp20 = shr int %x, ubyte 3 ; <int> [#uses=2]
+ %tmp21 = sub int %tmp18, %tmp20 ; <int> [#uses=1]
+ %tmp27 = tail call int %tmp( %struct.gx_device_memory* %tmp8, int %tmp20, int %y, int %tmp21, int %h, int 1 ) ; <int> [#uses=2]
+ %tmp29 = setlt int %tmp27, 0 ; <bool> [#uses=1]
+ br bool %tmp29, label %cond_true30, label %cond_next46
+
+cond_true30: ; preds = %cond_true10
+ %tmp41 = tail call int %mem_copy_mono_recover( %struct.gx_device* %dev, ubyte* %base, int %sourcex, int %raster, int %x, int %y, int %w, int %h, uint %zero, uint %one, int %tmp27 ) ; <int> [#uses=1]
+ ret int %tmp41
+
+cond_next46: ; preds = %cond_true10, %cond_next
+ %tmp48 = setgt int %w, 0 ; <bool> [#uses=1]
+ %tmp53 = setgt int %h, 0 ; <bool> [#uses=1]
+ %bothcond = and bool %tmp53, %tmp48 ; <bool> [#uses=1]
+ br bool %bothcond, label %bb58, label %return
+
+bb58: ; preds = %cond_next46
+ %tmp60 = setlt int %x, 0 ; <bool> [#uses=1]
+ br bool %tmp60, label %return, label %cond_next63
+
+cond_next63: ; preds = %bb58
+ %tmp65 = getelementptr %struct.gx_device_memory* %tmp8, int 0, uint 3 ; <int*> [#uses=1]
+ %tmp66 = load int* %tmp65 ; <int> [#uses=1]
+ %tmp68 = sub int %tmp66, %w ; <int> [#uses=1]
+ %tmp70 = setlt int %tmp68, %x ; <bool> [#uses=1]
+ %tmp75 = setlt int %y, 0 ; <bool> [#uses=1]
+ %bothcond1 = or bool %tmp70, %tmp75 ; <bool> [#uses=1]
+ br bool %bothcond1, label %return, label %cond_next78
+
+cond_next78: ; preds = %cond_next63
+ %tmp80 = getelementptr %struct.gx_device_memory* %tmp8, int 0, uint 4 ; <int*> [#uses=1]
+ %tmp81 = load int* %tmp80 ; <int> [#uses=1]
+ %tmp83 = sub int %tmp81, %h ; <int> [#uses=1]
+ %tmp85 = setlt int %tmp83, %y ; <bool> [#uses=1]
+ br bool %tmp85, label %return, label %bb91
+
+bb91: ; preds = %cond_next78
+ %tmp93 = shr int %x, ubyte 3 ; <int> [#uses=4]
+ %tmp = getelementptr %struct.gx_device_memory* %tmp8, int 0, uint 14 ; <ubyte***> [#uses=1]
+ %tmp = load ubyte*** %tmp ; <ubyte**> [#uses=1]
+ %tmp96 = getelementptr ubyte** %tmp, int %y ; <ubyte**> [#uses=4]
+ %tmp98 = load ubyte** %tmp96 ; <ubyte*> [#uses=1]
+ %tmp100 = getelementptr ubyte* %tmp98, int %tmp93 ; <ubyte*> [#uses=3]
+ %tmp102 = shr int %sourcex, ubyte 3 ; <int> [#uses=3]
+ %tmp106 = and int %sourcex, 7 ; <int> [#uses=1]
+ %tmp107 = sub int 8, %tmp106 ; <int> [#uses=4]
+ %tmp109 = and int %x, 7 ; <int> [#uses=3]
+ %tmp110 = sub int 8, %tmp109 ; <int> [#uses=8]
+ %tmp112 = sub int 8, %tmp110 ; <int> [#uses=1]
+ %tmp112 = cast int %tmp112 to ubyte ; <ubyte> [#uses=1]
+ %tmp113464 = shr uint 255, ubyte %tmp112 ; <uint> [#uses=4]
+ %tmp116 = setgt int %tmp110, %w ; <bool> [#uses=1]
+ %tmp132 = getelementptr %struct.gx_device_memory* %tmp8, int 0, uint 16 ; <int*> [#uses=2]
+ br bool %tmp116, label %cond_true117, label %cond_false123
+
+cond_true117: ; preds = %bb91
+ %tmp119 = cast int %w to ubyte ; <ubyte> [#uses=1]
+ %tmp120 = shr uint %tmp113464, ubyte %tmp119 ; <uint> [#uses=1]
+ %tmp122 = sub uint %tmp113464, %tmp120 ; <uint> [#uses=2]
+ %tmp13315 = load int* %tmp132 ; <int> [#uses=1]
+ %tmp13416 = seteq int %tmp13315, 0 ; <bool> [#uses=1]
+ br bool %tmp13416, label %cond_next151, label %cond_true135
+
+cond_false123: ; preds = %bb91
+ %tmp126 = sub int %w, %tmp110 ; <int> [#uses=1]
+ %tmp126 = cast int %tmp126 to ubyte ; <ubyte> [#uses=1]
+ %tmp127 = and ubyte %tmp126, 7 ; <ubyte> [#uses=1]
+ %tmp128 = shr uint 255, ubyte %tmp127 ; <uint> [#uses=1]
+ %tmp1295 = sub uint 255, %tmp128 ; <uint> [#uses=2]
+ %tmp133 = load int* %tmp132 ; <int> [#uses=1]
+ %tmp134 = seteq int %tmp133, 0 ; <bool> [#uses=1]
+ br bool %tmp134, label %cond_next151, label %cond_true135
+
+cond_true135: ; preds = %cond_false123, %cond_true117
+ %rmask.0.0 = phi uint [ undef, %cond_true117 ], [ %tmp1295, %cond_false123 ] ; <uint> [#uses=2]
+ %mask.1.0 = phi uint [ %tmp122, %cond_true117 ], [ %tmp113464, %cond_false123 ] ; <uint> [#uses=2]
+ %not.tmp137 = setne uint %zero, 4294967295 ; <bool> [#uses=1]
+ %tmp140 = cast bool %not.tmp137 to uint ; <uint> [#uses=1]
+ %zero_addr.0 = xor uint %tmp140, %zero ; <uint> [#uses=2]
+ %tmp144 = seteq uint %one, 4294967295 ; <bool> [#uses=1]
+ br bool %tmp144, label %cond_next151, label %cond_true145
+
+cond_true145: ; preds = %cond_true135
+ %tmp147 = xor uint %one, 1 ; <uint> [#uses=1]
+ br label %cond_next151
+
+cond_next151: ; preds = %cond_true145, %cond_true135, %cond_false123, %cond_true117
+ %rmask.0.1 = phi uint [ %rmask.0.0, %cond_true145 ], [ undef, %cond_true117 ], [ %tmp1295, %cond_false123 ], [ %rmask.0.0, %cond_true135 ] ; <uint> [#uses=4]
+ %mask.1.1 = phi uint [ %mask.1.0, %cond_true145 ], [ %tmp122, %cond_true117 ], [ %tmp113464, %cond_false123 ], [ %mask.1.0, %cond_true135 ] ; <uint> [#uses=4]
+ %one_addr.0 = phi uint [ %tmp147, %cond_true145 ], [ %one, %cond_true117 ], [ %one, %cond_false123 ], [ %one, %cond_true135 ] ; <uint> [#uses=2]
+ %zero_addr.1 = phi uint [ %zero_addr.0, %cond_true145 ], [ %zero, %cond_true117 ], [ %zero, %cond_false123 ], [ %zero_addr.0, %cond_true135 ] ; <uint> [#uses=2]
+ %tmp153 = seteq uint %zero_addr.1, 1 ; <bool> [#uses=2]
+ %tmp158 = seteq uint %one_addr.0, 0 ; <bool> [#uses=2]
+ %bothcond2 = or bool %tmp153, %tmp158 ; <bool> [#uses=1]
+ %iftmp.35.0 = select bool %bothcond2, uint 4294967295, uint 0 ; <uint> [#uses=8]
+ %tmp167 = seteq uint %zero_addr.1, 0 ; <bool> [#uses=1]
+ %bothcond3 = or bool %tmp167, %tmp158 ; <bool> [#uses=1]
+ %iftmp.36.0 = select bool %bothcond3, uint 0, uint 4294967295 ; <uint> [#uses=4]
+ %tmp186 = seteq uint %one_addr.0, 1 ; <bool> [#uses=1]
+ %bothcond4 = or bool %tmp153, %tmp186 ; <bool> [#uses=1]
+ %iftmp.37.0 = select bool %bothcond4, uint 4294967295, uint 0 ; <uint> [#uses=6]
+ %tmp196 = seteq int %tmp107, %tmp110 ; <bool> [#uses=1]
+ br bool %tmp196, label %cond_true197, label %cond_false299
+
+cond_true197: ; preds = %cond_next151
+ %tmp29222 = add int %h, -1 ; <int> [#uses=3]
+ %tmp29424 = setlt int %tmp29222, 0 ; <bool> [#uses=1]
+ br bool %tmp29424, label %return, label %cond_true295.preheader
+
+cond_true249.preheader: ; preds = %cond_true295
+ br label %cond_true249
+
+cond_true249: ; preds = %cond_true249, %cond_true249.preheader
+ %indvar = phi uint [ 0, %cond_true249.preheader ], [ %indvar.next, %cond_true249 ] ; <uint> [#uses=2]
+ %optr.3.2 = phi ubyte* [ %tmp232, %cond_true249 ], [ %dest.1.0, %cond_true249.preheader ] ; <ubyte*> [#uses=1]
+ %bptr.3.2 = phi ubyte* [ %tmp226, %cond_true249 ], [ %line.1.0, %cond_true249.preheader ] ; <ubyte*> [#uses=1]
+ %tmp. = add int %tmp109, %w ; <int> [#uses=1]
+ %indvar = cast uint %indvar to int ; <int> [#uses=1]
+ %tmp.58 = mul int %indvar, -8 ; <int> [#uses=1]
+ %tmp.57 = add int %tmp., -16 ; <int> [#uses=1]
+ %tmp246.2 = add int %tmp.58, %tmp.57 ; <int> [#uses=1]
+ %tmp225 = cast ubyte* %bptr.3.2 to uint ; <uint> [#uses=1]
+ %tmp226 = add uint %tmp225, 1 ; <uint> [#uses=1]
+ %tmp226 = cast uint %tmp226 to ubyte* ; <ubyte*> [#uses=3]
+ %tmp228 = load ubyte* %tmp226 ; <ubyte> [#uses=1]
+ %tmp228 = cast ubyte %tmp228 to uint ; <uint> [#uses=1]
+ %tmp230 = xor uint %tmp228, %iftmp.35.0 ; <uint> [#uses=2]
+ %tmp231 = cast ubyte* %optr.3.2 to uint ; <uint> [#uses=1]
+ %tmp232 = add uint %tmp231, 1 ; <uint> [#uses=1]
+ %tmp232 = cast uint %tmp232 to ubyte* ; <ubyte*> [#uses=4]
+ %tmp235 = or uint %tmp230, %iftmp.36.0 ; <uint> [#uses=1]
+ %tmp235 = cast uint %tmp235 to ubyte ; <ubyte> [#uses=1]
+ %tmp237 = load ubyte* %tmp232 ; <ubyte> [#uses=1]
+ %tmp238 = and ubyte %tmp235, %tmp237 ; <ubyte> [#uses=1]
+ %tmp241 = and uint %tmp230, %iftmp.37.0 ; <uint> [#uses=1]
+ %tmp241 = cast uint %tmp241 to ubyte ; <ubyte> [#uses=1]
+ %tmp242 = or ubyte %tmp238, %tmp241 ; <ubyte> [#uses=1]
+ store ubyte %tmp242, ubyte* %tmp232
+ %tmp24629 = add int %tmp246.2, -8 ; <int> [#uses=2]
+ %tmp24831 = setlt int %tmp24629, 0 ; <bool> [#uses=1]
+ %indvar.next = add uint %indvar, 1 ; <uint> [#uses=1]
+ br bool %tmp24831, label %bb252.loopexit, label %cond_true249
+
+bb252.loopexit: ; preds = %cond_true249
+ br label %bb252
+
+bb252: ; preds = %cond_true295, %bb252.loopexit
+ %optr.3.3 = phi ubyte* [ %dest.1.0, %cond_true295 ], [ %tmp232, %bb252.loopexit ] ; <ubyte*> [#uses=1]
+ %bptr.3.3 = phi ubyte* [ %line.1.0, %cond_true295 ], [ %tmp226, %bb252.loopexit ] ; <ubyte*> [#uses=1]
+ %tmp246.3 = phi int [ %tmp246, %cond_true295 ], [ %tmp24629, %bb252.loopexit ] ; <int> [#uses=1]
+ %tmp254 = setgt int %tmp246.3, -8 ; <bool> [#uses=1]
+ br bool %tmp254, label %cond_true255, label %cond_next280
+
+cond_true255: ; preds = %bb252
+ %tmp256 = cast ubyte* %bptr.3.3 to uint ; <uint> [#uses=1]
+ %tmp257 = add uint %tmp256, 1 ; <uint> [#uses=1]
+ %tmp257 = cast uint %tmp257 to ubyte* ; <ubyte*> [#uses=1]
+ %tmp259 = load ubyte* %tmp257 ; <ubyte> [#uses=1]
+ %tmp259 = cast ubyte %tmp259 to uint ; <uint> [#uses=1]
+ %tmp261 = xor uint %tmp259, %iftmp.35.0 ; <uint> [#uses=2]
+ %tmp262 = cast ubyte* %optr.3.3 to uint ; <uint> [#uses=1]
+ %tmp263 = add uint %tmp262, 1 ; <uint> [#uses=1]
+ %tmp263 = cast uint %tmp263 to ubyte* ; <ubyte*> [#uses=2]
+ %tmp265 = cast uint %tmp261 to ubyte ; <ubyte> [#uses=1]
+ %tmp268 = or ubyte %tmp266, %tmp265 ; <ubyte> [#uses=1]
+ %tmp270 = load ubyte* %tmp263 ; <ubyte> [#uses=1]
+ %tmp271 = and ubyte %tmp268, %tmp270 ; <ubyte> [#uses=1]
+ %tmp276 = and uint %tmp274, %tmp261 ; <uint> [#uses=1]
+ %tmp276 = cast uint %tmp276 to ubyte ; <ubyte> [#uses=1]
+ %tmp277 = or ubyte %tmp271, %tmp276 ; <ubyte> [#uses=1]
+ store ubyte %tmp277, ubyte* %tmp263
+ br label %cond_next280
+
+cond_next280: ; preds = %cond_true255, %bb252
+ %tmp281 = cast ubyte** %dest_line.1.0 to uint ; <uint> [#uses=1]
+ %tmp282 = add uint %tmp281, 4 ; <uint> [#uses=1]
+ %tmp282 = cast uint %tmp282 to ubyte** ; <ubyte**> [#uses=2]
+ %tmp284 = load ubyte** %tmp282 ; <ubyte*> [#uses=1]
+ %tmp286 = getelementptr ubyte* %tmp284, int %tmp93 ; <ubyte*> [#uses=1]
+ %tmp292 = add int %tmp292.0, -1 ; <int> [#uses=1]
+ %tmp294 = setlt int %tmp292, 0 ; <bool> [#uses=1]
+ %indvar.next61 = add uint %indvar60, 1 ; <uint> [#uses=1]
+ br bool %tmp294, label %return.loopexit, label %cond_true295
+
+cond_true295.preheader: ; preds = %cond_true197
+ %tmp200 = sub int %w, %tmp110 ; <int> [#uses=1]
+ %tmp209 = cast uint %mask.1.1 to ubyte ; <ubyte> [#uses=1]
+ %tmp209not = xor ubyte %tmp209, 255 ; <ubyte> [#uses=1]
+ %tmp212 = cast uint %iftmp.36.0 to ubyte ; <ubyte> [#uses=2]
+ %tmp211 = or ubyte %tmp212, %tmp209not ; <ubyte> [#uses=2]
+ %tmp219 = and uint %iftmp.37.0, %mask.1.1 ; <uint> [#uses=2]
+ %tmp246 = add int %tmp200, -8 ; <int> [#uses=3]
+ %tmp248 = setlt int %tmp246, 0 ; <bool> [#uses=1]
+ %tmp264 = cast uint %rmask.0.1 to ubyte ; <ubyte> [#uses=1]
+ %tmp264not = xor ubyte %tmp264, 255 ; <ubyte> [#uses=1]
+ %tmp266 = or ubyte %tmp212, %tmp264not ; <ubyte> [#uses=2]
+ %tmp274 = and uint %iftmp.37.0, %rmask.0.1 ; <uint> [#uses=2]
+ br bool %tmp248, label %cond_true295.preheader.split.us, label %cond_true295.preheader.split
+
+cond_true295.preheader.split.us: ; preds = %cond_true295.preheader
+ br label %cond_true295.us
+
+cond_true295.us: ; preds = %cond_next280.us, %cond_true295.preheader.split.us
+ %indvar86 = phi uint [ 0, %cond_true295.preheader.split.us ], [ %indvar.next87, %cond_next280.us ] ; <uint> [#uses=3]
+ %dest.1.0.us = phi ubyte* [ %tmp286.us, %cond_next280.us ], [ %tmp100, %cond_true295.preheader.split.us ] ; <ubyte*> [#uses=3]
+ %dest_line.1.0.us = phi ubyte** [ %tmp282.us, %cond_next280.us ], [ %tmp96, %cond_true295.preheader.split.us ] ; <ubyte**> [#uses=1]
+ %tmp.89 = sub uint 0, %indvar86 ; <uint> [#uses=1]
+ %tmp.89 = cast uint %tmp.89 to int ; <int> [#uses=1]
+ %tmp292.0.us = add int %tmp.89, %tmp29222 ; <int> [#uses=1]
+ %tmp.91 = mul uint %indvar86, %raster ; <uint> [#uses=1]
+ %tmp.91 = cast uint %tmp.91 to int ; <int> [#uses=1]
+ %tmp104.sum101 = add int %tmp102, %tmp.91 ; <int> [#uses=1]
+ %line.1.0.us = getelementptr ubyte* %base, int %tmp104.sum101 ; <ubyte*> [#uses=2]
+ %tmp.us = load ubyte* %line.1.0.us ; <ubyte> [#uses=1]
+ %tmp206.us = cast ubyte %tmp.us to uint ; <uint> [#uses=1]
+ %tmp208.us = xor uint %tmp206.us, %iftmp.35.0 ; <uint> [#uses=2]
+ %tmp210.us = cast uint %tmp208.us to ubyte ; <ubyte> [#uses=1]
+ %tmp213.us = or ubyte %tmp211, %tmp210.us ; <ubyte> [#uses=1]
+ %tmp215.us = load ubyte* %dest.1.0.us ; <ubyte> [#uses=1]
+ %tmp216.us = and ubyte %tmp213.us, %tmp215.us ; <ubyte> [#uses=1]
+ %tmp221.us = and uint %tmp219, %tmp208.us ; <uint> [#uses=1]
+ %tmp221.us = cast uint %tmp221.us to ubyte ; <ubyte> [#uses=1]
+ %tmp222.us = or ubyte %tmp216.us, %tmp221.us ; <ubyte> [#uses=1]
+ store ubyte %tmp222.us, ubyte* %dest.1.0.us
+ br bool true, label %bb252.us, label %cond_true249.preheader.us
+
+cond_next280.us: ; preds = %bb252.us, %cond_true255.us
+ %tmp281.us = cast ubyte** %dest_line.1.0.us to uint ; <uint> [#uses=1]
+ %tmp282.us = add uint %tmp281.us, 4 ; <uint> [#uses=1]
+ %tmp282.us = cast uint %tmp282.us to ubyte** ; <ubyte**> [#uses=2]
+ %tmp284.us = load ubyte** %tmp282.us ; <ubyte*> [#uses=1]
+ %tmp286.us = getelementptr ubyte* %tmp284.us, int %tmp93 ; <ubyte*> [#uses=1]
+ %tmp292.us = add int %tmp292.0.us, -1 ; <int> [#uses=1]
+ %tmp294.us = setlt int %tmp292.us, 0 ; <bool> [#uses=1]
+ %indvar.next87 = add uint %indvar86, 1 ; <uint> [#uses=1]
+ br bool %tmp294.us, label %return.loopexit.us, label %cond_true295.us
+
+cond_true255.us: ; preds = %bb252.us
+ %tmp256.us = cast ubyte* %bptr.3.3.us to uint ; <uint> [#uses=1]
+ %tmp257.us = add uint %tmp256.us, 1 ; <uint> [#uses=1]
+ %tmp257.us = cast uint %tmp257.us to ubyte* ; <ubyte*> [#uses=1]
+ %tmp259.us = load ubyte* %tmp257.us ; <ubyte> [#uses=1]
+ %tmp259.us = cast ubyte %tmp259.us to uint ; <uint> [#uses=1]
+ %tmp261.us = xor uint %tmp259.us, %iftmp.35.0 ; <uint> [#uses=2]
+ %tmp262.us = cast ubyte* %optr.3.3.us to uint ; <uint> [#uses=1]
+ %tmp263.us = add uint %tmp262.us, 1 ; <uint> [#uses=1]
+ %tmp263.us = cast uint %tmp263.us to ubyte* ; <ubyte*> [#uses=2]
+ %tmp265.us = cast uint %tmp261.us to ubyte ; <ubyte> [#uses=1]
+ %tmp268.us = or ubyte %tmp266, %tmp265.us ; <ubyte> [#uses=1]
+ %tmp270.us = load ubyte* %tmp263.us ; <ubyte> [#uses=1]
+ %tmp271.us = and ubyte %tmp268.us, %tmp270.us ; <ubyte> [#uses=1]
+ %tmp276.us = and uint %tmp274, %tmp261.us ; <uint> [#uses=1]
+ %tmp276.us = cast uint %tmp276.us to ubyte ; <ubyte> [#uses=1]
+ %tmp277.us = or ubyte %tmp271.us, %tmp276.us ; <ubyte> [#uses=1]
+ store ubyte %tmp277.us, ubyte* %tmp263.us
+ br label %cond_next280.us
+
+bb252.us: ; preds = %bb252.loopexit.us, %cond_true295.us
+ %optr.3.3.us = phi ubyte* [ %dest.1.0.us, %cond_true295.us ], [ undef, %bb252.loopexit.us ] ; <ubyte*> [#uses=1]
+ %bptr.3.3.us = phi ubyte* [ %line.1.0.us, %cond_true295.us ], [ undef, %bb252.loopexit.us ] ; <ubyte*> [#uses=1]
+ %tmp246.3.us = phi int [ %tmp246, %cond_true295.us ], [ undef, %bb252.loopexit.us ] ; <int> [#uses=1]
+ %tmp254.us = setgt int %tmp246.3.us, -8 ; <bool> [#uses=1]
+ br bool %tmp254.us, label %cond_true255.us, label %cond_next280.us
+
+cond_true249.us: ; preds = %cond_true249.preheader.us, %cond_true249.us
+ br bool undef, label %bb252.loopexit.us, label %cond_true249.us
+
+cond_true249.preheader.us: ; preds = %cond_true295.us
+ br label %cond_true249.us
+
+bb252.loopexit.us: ; preds = %cond_true249.us
+ br label %bb252.us
+
+return.loopexit.us: ; preds = %cond_next280.us
+ br label %return.loopexit.split
+
+cond_true295.preheader.split: ; preds = %cond_true295.preheader
+ br label %cond_true295
+
+cond_true295: ; preds = %cond_true295.preheader.split, %cond_next280
+ %indvar60 = phi uint [ 0, %cond_true295.preheader.split ], [ %indvar.next61, %cond_next280 ] ; <uint> [#uses=3]
+ %dest.1.0 = phi ubyte* [ %tmp286, %cond_next280 ], [ %tmp100, %cond_true295.preheader.split ] ; <ubyte*> [#uses=4]
+ %dest_line.1.0 = phi ubyte** [ %tmp282, %cond_next280 ], [ %tmp96, %cond_true295.preheader.split ] ; <ubyte**> [#uses=1]
+ %tmp.63 = sub uint 0, %indvar60 ; <uint> [#uses=1]
+ %tmp.63 = cast uint %tmp.63 to int ; <int> [#uses=1]
+ %tmp292.0 = add int %tmp.63, %tmp29222 ; <int> [#uses=1]
+ %tmp.65 = mul uint %indvar60, %raster ; <uint> [#uses=1]
+ %tmp.65 = cast uint %tmp.65 to int ; <int> [#uses=1]
+ %tmp104.sum97 = add int %tmp102, %tmp.65 ; <int> [#uses=1]
+ %line.1.0 = getelementptr ubyte* %base, int %tmp104.sum97 ; <ubyte*> [#uses=3]
+ %tmp = load ubyte* %line.1.0 ; <ubyte> [#uses=1]
+ %tmp206 = cast ubyte %tmp to uint ; <uint> [#uses=1]
+ %tmp208 = xor uint %tmp206, %iftmp.35.0 ; <uint> [#uses=2]
+ %tmp210 = cast uint %tmp208 to ubyte ; <ubyte> [#uses=1]
+ %tmp213 = or ubyte %tmp211, %tmp210 ; <ubyte> [#uses=1]
+ %tmp215 = load ubyte* %dest.1.0 ; <ubyte> [#uses=1]
+ %tmp216 = and ubyte %tmp213, %tmp215 ; <ubyte> [#uses=1]
+ %tmp221 = and uint %tmp219, %tmp208 ; <uint> [#uses=1]
+ %tmp221 = cast uint %tmp221 to ubyte ; <ubyte> [#uses=1]
+ %tmp222 = or ubyte %tmp216, %tmp221 ; <ubyte> [#uses=1]
+ store ubyte %tmp222, ubyte* %dest.1.0
+ br bool false, label %bb252, label %cond_true249.preheader
+
+cond_false299: ; preds = %cond_next151
+ %tmp302 = sub int %tmp107, %tmp110 ; <int> [#uses=1]
+ %tmp303 = and int %tmp302, 7 ; <int> [#uses=3]
+ %tmp305 = sub int 8, %tmp303 ; <int> [#uses=1]
+ %tmp45438 = add int %h, -1 ; <int> [#uses=2]
+ %tmp45640 = setlt int %tmp45438, 0 ; <bool> [#uses=1]
+ br bool %tmp45640, label %return, label %cond_true457.preheader
+
+cond_true316: ; preds = %cond_true457
+ %tmp318 = cast ubyte %tmp318 to uint ; <uint> [#uses=1]
+ %tmp320 = shr uint %tmp318, ubyte %tmp319 ; <uint> [#uses=1]
+ br label %cond_next340
+
+cond_false321: ; preds = %cond_true457
+ %tmp3188 = cast ubyte %tmp318 to uint ; <uint> [#uses=1]
+ %tmp325 = shl uint %tmp3188, ubyte %tmp324 ; <uint> [#uses=2]
+ %tmp326 = cast ubyte* %line.3.0 to uint ; <uint> [#uses=1]
+ %tmp327 = add uint %tmp326, 1 ; <uint> [#uses=1]
+ %tmp327 = cast uint %tmp327 to ubyte* ; <ubyte*> [#uses=3]
+ br bool %tmp330, label %cond_true331, label %cond_next340
+
+cond_true331: ; preds = %cond_false321
+ %tmp333 = load ubyte* %tmp327 ; <ubyte> [#uses=1]
+ %tmp333 = cast ubyte %tmp333 to uint ; <uint> [#uses=1]
+ %tmp335 = shr uint %tmp333, ubyte %tmp319 ; <uint> [#uses=1]
+ %tmp337 = add uint %tmp335, %tmp325 ; <uint> [#uses=1]
+ br label %cond_next340
+
+cond_next340: ; preds = %cond_true331, %cond_false321, %cond_true316
+ %bits.0 = phi uint [ %tmp320, %cond_true316 ], [ %tmp337, %cond_true331 ], [ %tmp325, %cond_false321 ] ; <uint> [#uses=1]
+ %bptr307.3 = phi ubyte* [ %line.3.0, %cond_true316 ], [ %tmp327, %cond_true331 ], [ %tmp327, %cond_false321 ] ; <ubyte*> [#uses=2]
+ %tmp343 = xor uint %bits.0, %iftmp.35.0 ; <uint> [#uses=2]
+ %tmp345 = cast uint %tmp343 to ubyte ; <ubyte> [#uses=1]
+ %tmp348 = or ubyte %tmp346, %tmp345 ; <ubyte> [#uses=1]
+ %tmp350 = load ubyte* %dest.3.0 ; <ubyte> [#uses=1]
+ %tmp351 = and ubyte %tmp348, %tmp350 ; <ubyte> [#uses=1]
+ %tmp356 = and uint %tmp354, %tmp343 ; <uint> [#uses=1]
+ %tmp356 = cast uint %tmp356 to ubyte ; <ubyte> [#uses=1]
+ %tmp357 = or ubyte %tmp351, %tmp356 ; <ubyte> [#uses=1]
+ store ubyte %tmp357, ubyte* %dest.3.0
+ %tmp362 = cast ubyte* %dest.3.0 to uint ; <uint> [#uses=1]
+ %optr309.3.in51 = add uint %tmp362, 1 ; <uint> [#uses=2]
+ %optr309.353 = cast uint %optr309.3.in51 to ubyte* ; <ubyte*> [#uses=2]
+ br bool %tmp39755, label %cond_true398.preheader, label %bb401
+
+cond_true398.preheader: ; preds = %cond_next340
+ br label %cond_true398
+
+cond_true398: ; preds = %cond_true398, %cond_true398.preheader
+ %indvar66 = phi uint [ 0, %cond_true398.preheader ], [ %indvar.next67, %cond_true398 ] ; <uint> [#uses=3]
+ %bptr307.4.0 = phi ubyte* [ %tmp370, %cond_true398 ], [ %bptr307.3, %cond_true398.preheader ] ; <ubyte*> [#uses=2]
+ %optr309.3.0 = phi ubyte* [ %optr309.3, %cond_true398 ], [ %optr309.353, %cond_true398.preheader ] ; <ubyte*> [#uses=2]
+ %optr309.3.in.0 = add uint %indvar66, %optr309.3.in51 ; <uint> [#uses=1]
+ %tmp.70 = add int %tmp109, %w ; <int> [#uses=1]
+ %indvar66 = cast uint %indvar66 to int ; <int> [#uses=1]
+ %tmp.72 = mul int %indvar66, -8 ; <int> [#uses=1]
+ %tmp.71 = add int %tmp.70, -8 ; <int> [#uses=1]
+ %count308.3.0 = add int %tmp.72, %tmp.71 ; <int> [#uses=1]
+ %tmp366 = load ubyte* %bptr307.4.0 ; <ubyte> [#uses=1]
+ %tmp366 = cast ubyte %tmp366 to uint ; <uint> [#uses=1]
+ %tmp369 = cast ubyte* %bptr307.4.0 to uint ; <uint> [#uses=1]
+ %tmp370 = add uint %tmp369, 1 ; <uint> [#uses=1]
+ %tmp370 = cast uint %tmp370 to ubyte* ; <ubyte*> [#uses=3]
+ %tmp372 = load ubyte* %tmp370 ; <ubyte> [#uses=1]
+ %tmp372 = cast ubyte %tmp372 to uint ; <uint> [#uses=1]
+ %tmp374463 = shr uint %tmp372, ubyte %tmp319 ; <uint> [#uses=1]
+ %tmp368 = shl uint %tmp366, ubyte %tmp324 ; <uint> [#uses=1]
+ %tmp377 = add uint %tmp374463, %tmp368 ; <uint> [#uses=1]
+ %tmp379 = xor uint %tmp377, %iftmp.35.0 ; <uint> [#uses=2]
+ %tmp382 = or uint %tmp379, %iftmp.36.0 ; <uint> [#uses=1]
+ %tmp382 = cast uint %tmp382 to ubyte ; <ubyte> [#uses=1]
+ %tmp384 = load ubyte* %optr309.3.0 ; <ubyte> [#uses=1]
+ %tmp385 = and ubyte %tmp382, %tmp384 ; <ubyte> [#uses=1]
+ %tmp388 = and uint %tmp379, %iftmp.37.0 ; <uint> [#uses=1]
+ %tmp388 = cast uint %tmp388 to ubyte ; <ubyte> [#uses=1]
+ %tmp389 = or ubyte %tmp385, %tmp388 ; <ubyte> [#uses=1]
+ store ubyte %tmp389, ubyte* %optr309.3.0
+ %tmp392 = add int %count308.3.0, -8 ; <int> [#uses=2]
+ %optr309.3.in = add uint %optr309.3.in.0, 1 ; <uint> [#uses=1]
+ %optr309.3 = cast uint %optr309.3.in to ubyte* ; <ubyte*> [#uses=2]
+ %tmp397 = setgt int %tmp392, 7 ; <bool> [#uses=1]
+ %indvar.next67 = add uint %indvar66, 1 ; <uint> [#uses=1]
+ br bool %tmp397, label %cond_true398, label %bb401.loopexit
+
+bb401.loopexit: ; preds = %cond_true398
+ br label %bb401
+
+bb401: ; preds = %bb401.loopexit, %cond_next340
+ %count308.3.1 = phi int [ %tmp361, %cond_next340 ], [ %tmp392, %bb401.loopexit ] ; <int> [#uses=2]
+ %bptr307.4.1 = phi ubyte* [ %bptr307.3, %cond_next340 ], [ %tmp370, %bb401.loopexit ] ; <ubyte*> [#uses=2]
+ %optr309.3.1 = phi ubyte* [ %optr309.353, %cond_next340 ], [ %optr309.3, %bb401.loopexit ] ; <ubyte*> [#uses=2]
+ %tmp403 = setgt int %count308.3.1, 0 ; <bool> [#uses=1]
+ br bool %tmp403, label %cond_true404, label %cond_next442
+
+cond_true404: ; preds = %bb401
+ %tmp406 = load ubyte* %bptr307.4.1 ; <ubyte> [#uses=1]
+ %tmp406 = cast ubyte %tmp406 to int ; <int> [#uses=1]
+ %tmp408 = shl int %tmp406, ubyte %tmp324 ; <int> [#uses=2]
+ %tmp413 = setgt int %count308.3.1, %tmp303 ; <bool> [#uses=1]
+ br bool %tmp413, label %cond_true414, label %cond_next422
+
+cond_true414: ; preds = %cond_true404
+ %tmp409 = cast ubyte* %bptr307.4.1 to uint ; <uint> [#uses=1]
+ %tmp410 = add uint %tmp409, 1 ; <uint> [#uses=1]
+ %tmp410 = cast uint %tmp410 to ubyte* ; <ubyte*> [#uses=1]
+ %tmp416 = load ubyte* %tmp410 ; <ubyte> [#uses=1]
+ %tmp416 = cast ubyte %tmp416 to uint ; <uint> [#uses=1]
+ %tmp418 = shr uint %tmp416, ubyte %tmp319 ; <uint> [#uses=1]
+ %tmp418 = cast uint %tmp418 to int ; <int> [#uses=1]
+ %tmp420 = add int %tmp418, %tmp408 ; <int> [#uses=1]
+ br label %cond_next422
+
+cond_next422: ; preds = %cond_true414, %cond_true404
+ %bits.6 = phi int [ %tmp420, %cond_true414 ], [ %tmp408, %cond_true404 ] ; <int> [#uses=1]
+ %tmp425 = xor int %bits.6, %iftmp.35.0 ; <int> [#uses=1]
+ %tmp427 = cast int %tmp425 to ubyte ; <ubyte> [#uses=2]
+ %tmp430 = or ubyte %tmp428, %tmp427 ; <ubyte> [#uses=1]
+ %tmp432 = load ubyte* %optr309.3.1 ; <ubyte> [#uses=1]
+ %tmp433 = and ubyte %tmp430, %tmp432 ; <ubyte> [#uses=1]
+ %tmp438 = and ubyte %tmp436, %tmp427 ; <ubyte> [#uses=1]
+ %tmp439 = or ubyte %tmp433, %tmp438 ; <ubyte> [#uses=1]
+ store ubyte %tmp439, ubyte* %optr309.3.1
+ br label %cond_next442
+
+cond_next442: ; preds = %cond_next422, %bb401
+ %tmp443 = cast ubyte** %dest_line.3.0 to uint ; <uint> [#uses=1]
+ %tmp444 = add uint %tmp443, 4 ; <uint> [#uses=1]
+ %tmp444 = cast uint %tmp444 to ubyte** ; <ubyte**> [#uses=2]
+ %tmp446 = load ubyte** %tmp444 ; <ubyte*> [#uses=1]
+ %tmp448 = getelementptr ubyte* %tmp446, int %tmp93 ; <ubyte*> [#uses=1]
+ %tmp454 = add int %tmp454.0, -1 ; <int> [#uses=1]
+ %tmp456 = setlt int %tmp454, 0 ; <bool> [#uses=1]
+ %indvar.next75 = add uint %indvar74, 1 ; <uint> [#uses=1]
+ br bool %tmp456, label %return.loopexit56, label %cond_true457
+
+cond_true457.preheader: ; preds = %cond_false299
+ %tmp315 = setlt int %tmp107, %tmp110 ; <bool> [#uses=1]
+ %tmp319 = cast int %tmp303 to ubyte ; <ubyte> [#uses=4]
+ %tmp324 = cast int %tmp305 to ubyte ; <ubyte> [#uses=3]
+ %tmp330 = setlt int %tmp107, %w ; <bool> [#uses=1]
+ %tmp344 = cast uint %mask.1.1 to ubyte ; <ubyte> [#uses=1]
+ %tmp344not = xor ubyte %tmp344, 255 ; <ubyte> [#uses=1]
+ %tmp347 = cast uint %iftmp.36.0 to ubyte ; <ubyte> [#uses=2]
+ %tmp346 = or ubyte %tmp347, %tmp344not ; <ubyte> [#uses=1]
+ %tmp354 = and uint %iftmp.37.0, %mask.1.1 ; <uint> [#uses=1]
+ %tmp361 = sub int %w, %tmp110 ; <int> [#uses=2]
+ %tmp39755 = setgt int %tmp361, 7 ; <bool> [#uses=1]
+ %iftmp.35.0 = cast uint %iftmp.35.0 to int ; <int> [#uses=1]
+ %tmp426 = cast uint %rmask.0.1 to ubyte ; <ubyte> [#uses=1]
+ %tmp426not = xor ubyte %tmp426, 255 ; <ubyte> [#uses=1]
+ %tmp428 = or ubyte %tmp347, %tmp426not ; <ubyte> [#uses=1]
+ %tmp436 = and uint %iftmp.37.0, %rmask.0.1 ; <uint> [#uses=1]
+ %tmp436 = cast uint %tmp436 to ubyte ; <ubyte> [#uses=1]
+ br label %cond_true457
+
+cond_true457: ; preds = %cond_true457.preheader, %cond_next442
+ %indvar74 = phi uint [ 0, %cond_true457.preheader ], [ %indvar.next75, %cond_next442 ] ; <uint> [#uses=3]
+ %dest.3.0 = phi ubyte* [ %tmp448, %cond_next442 ], [ %tmp100, %cond_true457.preheader ] ; <ubyte*> [#uses=3]
+ %dest_line.3.0 = phi ubyte** [ %tmp444, %cond_next442 ], [ %tmp96, %cond_true457.preheader ] ; <ubyte**> [#uses=1]
+ %tmp.77 = sub uint 0, %indvar74 ; <uint> [#uses=1]
+ %tmp.77 = cast uint %tmp.77 to int ; <int> [#uses=1]
+ %tmp454.0 = add int %tmp.77, %tmp45438 ; <int> [#uses=1]
+ %tmp.79 = mul uint %indvar74, %raster ; <uint> [#uses=1]
+ %tmp.79 = cast uint %tmp.79 to int ; <int> [#uses=1]
+ %tmp104.sum = add int %tmp102, %tmp.79 ; <int> [#uses=1]
+ %line.3.0 = getelementptr ubyte* %base, int %tmp104.sum ; <ubyte*> [#uses=3]
+ %tmp318 = load ubyte* %line.3.0 ; <ubyte> [#uses=2]
+ br bool %tmp315, label %cond_false321, label %cond_true316
+
+return.loopexit: ; preds = %cond_next280
+ br label %return.loopexit.split
+
+return.loopexit.split: ; preds = %return.loopexit, %return.loopexit.us
+ br label %return
+
+return.loopexit56: ; preds = %cond_next442
+ br label %return
+
+return: ; preds = %return.loopexit56, %return.loopexit.split, %cond_false299, %cond_true197, %cond_next78, %cond_next63, %bb58, %cond_next46
+ %retval.0 = phi int [ 0, %cond_next46 ], [ -1, %bb58 ], [ -1, %cond_next63 ], [ -1, %cond_next78 ], [ 0, %cond_true197 ], [ 0, %cond_false299 ], [ 0, %return.loopexit.split ], [ 0, %return.loopexit56 ] ; <int> [#uses=1]
+ ret int %retval.0
+}
+
+declare int %mem_no_fault_proc(%struct.gx_device_memory*, int, int, int, int, int)
+
+declare int %mem_mono_fill_rectangle(%struct.gx_device*, int, int, int, int, uint)
+
+declare int %mem_copy_mono_recover(%struct.gx_device*, ubyte*, int, int, int, int, int, int, uint, uint, int)
diff --git a/test/Transforms/InstCombine/2006-06-28-infloop.ll b/test/Transforms/InstCombine/2006-06-28-infloop.ll
new file mode 100644
index 0000000..bd1dbd0
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-06-28-infloop.ll
@@ -0,0 +1,21 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+target endian = big
+target pointersize = 32
+target triple = "powerpc-apple-darwin8"
+
+implementation ; Functions:
+
+void %test() {
+entry:
+ %tmp = getelementptr { long, long, long, long }* null, int 0, uint 3
+ %tmp = load long* %tmp ; <long> [#uses=1]
+ %tmp8.ui = load ulong* null ; <ulong> [#uses=1]
+ %tmp8 = cast ulong %tmp8.ui to long ; <long> [#uses=1]
+ %tmp9 = and long %tmp8, %tmp ; <long> [#uses=1]
+ %sext = cast long %tmp9 to int ; <int> [#uses=1]
+ %tmp27.i = cast int %sext to long ; <long> [#uses=1]
+ tail call void %foo( uint 0, long %tmp27.i )
+ unreachable
+}
+
+declare void %foo(uint, long)
diff --git a/test/Transforms/InstCombine/2006-09-11-EmptyStructCrash.ll b/test/Transforms/InstCombine/2006-09-11-EmptyStructCrash.ll
new file mode 100644
index 0000000..61f01ce
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-09-11-EmptyStructCrash.ll
@@ -0,0 +1,48 @@
+; PR905
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+; END.
+
+ %RPYTHON_EXCEPTION = type { %RPYTHON_EXCEPTION_VTABLE* }
+ %RPYTHON_EXCEPTION_VTABLE = type { %RPYTHON_EXCEPTION_VTABLE*, int, int, %RPyOpaque_RuntimeTypeInfo*, %arraytype_Char*, %functiontype_12* }
+ %RPyOpaque_RuntimeTypeInfo = type opaque*
+ %arraytype_Char = type { int, [0 x sbyte] }
+ %fixarray_array1019 = type [1019 x sbyte*]
+ %functiontype_12 = type %RPYTHON_EXCEPTION* ()
+ %functiontype_14 = type void (%structtype_pypy.rpython.memory.gc.MarkSweepGC*)
+ %structtype_AddressLinkedListChunk = type { %structtype_AddressLinkedListChunk*, int, %fixarray_array1019 }
+ %structtype_exceptions.Exception = type { %RPYTHON_EXCEPTION }
+ %structtype_gc_pool = type { }
+ %structtype_gc_pool_node = type { %structtype_header*, %structtype_gc_pool_node* }
+ %structtype_header = type { int, %structtype_header* }
+ %structtype_pypy.rpython.memory.gc.MarkSweepGC = type { %structtype_exceptions.Exception, int, int, bool, %structtype_gc_pool*, int, %structtype_header*, %structtype_header*, %structtype_gc_pool_node*, double, double }
+
+implementation ; Functions:
+
+fastcc void %pypy_MarkSweepGC.collect() {
+block0:
+ %v1221 = load %structtype_AddressLinkedListChunk** null ; <%structtype_AddressLinkedListChunk*> [#uses=1]
+ %v1222 = setne %structtype_AddressLinkedListChunk* %v1221, null ; <bool> [#uses=1]
+ br bool %v1222, label %block79, label %block4
+
+block4: ; preds = %block0
+ ret void
+
+block22: ; preds = %block79
+ ret void
+
+block67: ; preds = %block79
+ %v1459 = load %structtype_gc_pool** null ; <%structtype_gc_pool*> [#uses=1]
+ %v1460 = cast %structtype_gc_pool* %v1459 to sbyte* ; <sbyte*> [#uses=1]
+ %tmp_873 = cast sbyte* %v1460 to int ; <int> [#uses=1]
+ %tmp_874 = sub int %tmp_873, 0 ; <int> [#uses=1]
+ %v1461 = cast int %tmp_874 to sbyte* ; <sbyte*> [#uses=1]
+ %v1462 = cast sbyte* %v1461 to %structtype_header* ; <%structtype_header*> [#uses=1]
+ %tmp_876 = getelementptr %structtype_header* %v1462, int 0, uint 0 ; <int*> [#uses=1]
+ store int 0, int* %tmp_876
+ ret void
+
+block79: ; preds = %block0
+ %v1291 = load %structtype_gc_pool** null ; <%structtype_gc_pool*> [#uses=1]
+ %v1292 = setne %structtype_gc_pool* %v1291, null ; <bool> [#uses=1]
+ br bool %v1292, label %block67, label %block22
+}
diff --git a/test/Transforms/InstCombine/2006-09-15-CastToBool.ll b/test/Transforms/InstCombine/2006-09-15-CastToBool.ll
new file mode 100644
index 0000000..051d91f
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-09-15-CastToBool.ll
@@ -0,0 +1,14 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep and
+; PR913
+
+int %test(int* %tmp1) {
+ %tmp.i = load int* %tmp1 ; <int> [#uses=1]
+ %tmp = cast int %tmp.i to uint ; <uint> [#uses=1]
+ %tmp2.ui = shr uint %tmp, ubyte 5 ; <uint> [#uses=1]
+ %tmp2 = cast uint %tmp2.ui to int ; <int> [#uses=1]
+ %tmp3 = and int %tmp2, 1 ; <int> [#uses=1]
+ %tmp3 = cast int %tmp3 to bool ; <bool> [#uses=1]
+ %tmp34 = cast bool %tmp3 to int ; <int> [#uses=1]
+ ret int %tmp34
+}
+
diff --git a/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll b/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll
new file mode 100644
index 0000000..58c847f
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll
@@ -0,0 +1,10 @@
+; The optimizer should be able to remove cast operation here.
+; RUN: llvm-upgrade %s -o - | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep sext.*i32
+
+bool %eq_signed_to_small_unsigned(sbyte %SB) {
+ %Y = cast sbyte %SB to uint ; <uint> [#uses=1]
+ %C = seteq uint %Y, 17 ; <bool> [#uses=1]
+ ret bool %C
+ }
+
diff --git a/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst.ll b/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst.ll
new file mode 100644
index 0000000..3ada90d
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst.ll
@@ -0,0 +1,10 @@
+; This test case is reduced from llvmAsmParser.cpp
+; The optimizer should not remove the cast here.
+; RUN: llvm-upgrade %s -o - | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep sext.*i32
+
+bool %test(short %X) {
+ %A = cast short %X to uint
+ %B = setgt uint %A, 1330
+ ret bool %B
+}
diff --git a/test/Transforms/InstCombine/2006-10-20-mask.ll b/test/Transforms/InstCombine/2006-10-20-mask.ll
new file mode 100644
index 0000000..8e829a7
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-10-20-mask.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade %s -o - | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep and
+ulong %foo(ulong %tmp, ulong %tmp2) {
+ %tmp = cast ulong %tmp to uint
+ %tmp2 = cast ulong %tmp2 to uint
+ %tmp3 = and uint %tmp, %tmp2
+ %tmp4 = cast uint %tmp3 to ulong
+ ret ulong %tmp4
+}
diff --git a/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll b/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll
new file mode 100644
index 0000000..2c53224e
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep mul | wc -l | grep 2
+
+
+<4 x float> %test(<4 x float> %V) {
+ %Y = mul <4 x float> %V, <float 1.0, float 2.0, float 3.0, float 4.0>
+ %Z = mul <4 x float> %Y, <float 1.0, float 200000.0, float -3.0, float 4.0>
+ ret <4 x float> %Z
+}
diff --git a/test/Transforms/InstCombine/2006-11-03-Memmove64.ll b/test/Transforms/InstCombine/2006-11-03-Memmove64.ll
new file mode 100644
index 0000000..550c2eb
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-11-03-Memmove64.ll
@@ -0,0 +1,19 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep memmove.i32
+; Instcombine was trying to turn this into a memmove.i32
+
+target datalayout = "e-p:64:64"
+target endian = little
+target pointersize = 64
+target triple = "alphaev67-unknown-linux-gnu"
+%str10 = internal constant [1 x sbyte] zeroinitializer ; <[1 x sbyte]*> [#uses=1]
+
+implementation ; Functions:
+
+void %do_join(sbyte* %b) {
+entry:
+ call void %llvm.memmove.i64( sbyte* %b, sbyte* getelementptr ([1 x sbyte]* %str10, int 0, ulong 0), ulong 1, uint 1 )
+ ret void
+}
+
+declare void %llvm.memmove.i64(sbyte*, sbyte*, ulong, uint)
diff --git a/test/Transforms/InstCombine/2006-11-10-ashr-miscompile.ll b/test/Transforms/InstCombine/2006-11-10-ashr-miscompile.ll
new file mode 100644
index 0000000..1809d3c
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-11-10-ashr-miscompile.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep lshr
+; Verify this is not turned into -1.
+
+int %test(ubyte %amt) {
+ %B = lshr int -1, ubyte %amt
+ ret int %B
+}
diff --git a/test/Transforms/InstCombine/2006-11-27-XorBug.ll b/test/Transforms/InstCombine/2006-11-27-XorBug.ll
new file mode 100644
index 0000000..7cbd18e
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-11-27-XorBug.ll
@@ -0,0 +1,12 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep and.*32
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep or.*153
+; PR1014
+
+int %test(int %tmp1) {
+ %ovm = and int %tmp1, 32 ; <int> [#uses=1]
+ %ov3 = add int %ovm, 145 ; <int> [#uses=2]
+ %ov110 = xor int %ov3, 153
+ ret int %ov110
+}
+
diff --git a/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll b/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll
new file mode 100644
index 0000000..e2cdf31
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep sub
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep add
+
+<4 x float> %test(<4 x float> %tmp26, <4 x float> %tmp53) {
+ ; (X+Y)-Y != X for fp vectors.
+ %tmp64 = add <4 x float> %tmp26, %tmp53
+ %tmp75 = sub <4 x float> %tmp64, %tmp53
+ ret <4 x float> %tmp75
+}
diff --git a/test/Transforms/InstCombine/2006-12-05-fp-to-int-ext.ll b/test/Transforms/InstCombine/2006-12-05-fp-to-int-ext.ll
new file mode 100644
index 0000000..4661dfe
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-05-fp-to-int-ext.ll
@@ -0,0 +1,13 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep zext
+
+; Never merge these two conversions, even though it's possible: this is
+; significantly more expensive than the two conversions on some targets
+; and it causes libgcc to be compile __fixunsdfdi into a recursive
+; function.
+
+
+long %test(double %D) {
+ %A = fptoui double %D to uint
+ %B = zext uint %A to long
+ ret long %B
+}
diff --git a/test/Transforms/InstCombine/2006-12-08-ICmp-Combining.ll b/test/Transforms/InstCombine/2006-12-08-ICmp-Combining.ll
new file mode 100644
index 0000000..db4b9e2
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-08-ICmp-Combining.ll
@@ -0,0 +1,17 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep {%bothcond =}
+bool %Doit_bb(int %i.0) {
+bb: ; preds = %newFuncRoot
+ %tmp = setgt int %i.0, 0 ; <bool> [#uses=1]
+ %tmp.not = xor bool %tmp, true ; <bool> [#uses=1]
+ %tmp2 = setgt int %i.0, 8 ; <bool> [#uses=1]
+ %bothcond = or bool %tmp.not, %tmp2 ; <bool> [#uses=1]
+ br bool %bothcond, label %exitTrue, label %exitFalse
+
+exitTrue: ; preds = %bb
+ ret bool true
+
+exitFalse: ; preds = %bb
+ ret bool false
+
+}
diff --git a/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll b/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll
new file mode 100644
index 0000000..0965623
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll
@@ -0,0 +1,57 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep {icmp sgt}
+; END.
+
+; ModuleID = 'visible.bc'
+target datalayout = "e-p:32:32"
+target endian = little
+target pointersize = 32
+target triple = "i686-pc-linux-gnu"
+ %struct.point = type { int, int }
+
+implementation ; Functions:
+
+int %visible(int %direction, long %p1.0, long %p2.0, long %p3.0) {
+entry:
+ %p1_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p2_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p3_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ "alloca point" = bitcast int 0 to int ; <int> [#uses=0]
+ %tmp = bitcast %struct.point* %p1_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp = getelementptr { long }* %tmp, uint 0, uint 0 ; <long*> [#uses=1]
+ store long %p1.0, long* %tmp
+ %tmp1 = bitcast %struct.point* %p2_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp2 = getelementptr { long }* %tmp1, uint 0, uint 0 ; <long*> [#uses=1]
+ store long %p2.0, long* %tmp2
+ %tmp3 = bitcast %struct.point* %p3_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp4 = getelementptr { long }* %tmp3, uint 0, uint 0 ; <long*> [#uses=1]
+ store long %p3.0, long* %tmp4
+ %tmp = seteq int %direction, 0 ; <bool> [#uses=1]
+ %tmp5 = bitcast %struct.point* %p1_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp6 = getelementptr { long }* %tmp5, uint 0, uint 0 ; <long*> [#uses=1]
+ %tmp = load long* %tmp6 ; <long> [#uses=1]
+ %tmp7 = bitcast %struct.point* %p2_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp8 = getelementptr { long }* %tmp7, uint 0, uint 0 ; <long*> [#uses=1]
+ %tmp9 = load long* %tmp8 ; <long> [#uses=1]
+ %tmp10 = bitcast %struct.point* %p3_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp11 = getelementptr { long }* %tmp10, uint 0, uint 0 ; <long*> [#uses=1]
+ %tmp12 = load long* %tmp11 ; <long> [#uses=1]
+ %tmp13 = call int %determinant( long %tmp, long %tmp9, long %tmp12 ) ; <int> [#uses=2]
+ br bool %tmp, label %cond_true, label %cond_false
+
+cond_true: ; preds = %entry
+ %tmp14 = setlt int %tmp13, 0 ; <bool> [#uses=1]
+ %tmp14 = zext bool %tmp14 to int ; <int> [#uses=1]
+ br label %return
+
+cond_false: ; preds = %entry
+ %tmp26 = setgt int %tmp13, 0 ; <bool> [#uses=1]
+ %tmp26 = zext bool %tmp26 to int ; <int> [#uses=1]
+ br label %return
+
+return: ; preds = %cond_false, %cond_true
+ %retval.0 = phi int [ %tmp14, %cond_true ], [ %tmp26, %cond_false ] ; <int> [#uses=1]
+ ret int %retval.0
+}
+
+declare int %determinant(long, long, long)
diff --git a/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll b/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll
new file mode 100644
index 0000000..466fa60
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll
@@ -0,0 +1,44 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep select
+; END.
+
+target datalayout = "e-p:32:32"
+target endian = little
+target pointersize = 32
+target triple = "i686-pc-linux-gnu"
+ %struct.point = type { int, int }
+
+implementation ; Functions:
+
+int %visible(int %direction, long %p1.0, long %p2.0, long %p3.0) {
+entry:
+ %p1_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p2_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p3_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %tmp = bitcast %struct.point* %p1_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp = getelementptr { long }* %tmp, int 0, uint 0 ; <long*> [#uses=1]
+ store long %p1.0, long* %tmp
+ %tmp1 = bitcast %struct.point* %p2_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp2 = getelementptr { long }* %tmp1, int 0, uint 0 ; <long*> [#uses=1]
+ store long %p2.0, long* %tmp2
+ %tmp3 = bitcast %struct.point* %p3_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp4 = getelementptr { long }* %tmp3, int 0, uint 0 ; <long*> [#uses=1]
+ store long %p3.0, long* %tmp4
+ %tmp = seteq int %direction, 0 ; <bool> [#uses=1]
+ %tmp5 = bitcast %struct.point* %p1_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp6 = getelementptr { long }* %tmp5, int 0, uint 0 ; <long*> [#uses=1]
+ %tmp = load long* %tmp6 ; <long> [#uses=1]
+ %tmp7 = bitcast %struct.point* %p2_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp8 = getelementptr { long }* %tmp7, int 0, uint 0 ; <long*> [#uses=1]
+ %tmp9 = load long* %tmp8 ; <long> [#uses=1]
+ %tmp10 = bitcast %struct.point* %p3_addr to { long }* ; <{ long }*> [#uses=1]
+ %tmp11 = getelementptr { long }* %tmp10, int 0, uint 0 ; <long*> [#uses=1]
+ %tmp12 = load long* %tmp11 ; <long> [#uses=1]
+ %tmp13 = call int %determinant( long %tmp, long %tmp9, long %tmp12 ) ; <int> [#uses=2]
+ %tmp14 = setlt int %tmp13, 0 ; <bool> [#uses=1]
+ %tmp26 = setgt int %tmp13, 0 ; <bool> [#uses=1]
+ %retval.0.in = select bool %tmp, bool %tmp14, bool %tmp26 ; <bool> [#uses=1]
+ %retval.0 = zext bool %retval.0.in to int ; <int> [#uses=1]
+ ret int %retval.0
+}
+
+declare int %determinant(long, long, long)
diff --git a/test/Transforms/InstCombine/2006-12-10-ICmp-GEP-GEP.ll b/test/Transforms/InstCombine/2006-12-10-ICmp-GEP-GEP.ll
new file mode 100644
index 0000000..1343a4f
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-10-ICmp-GEP-GEP.ll
@@ -0,0 +1,167 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep -v {icmp ult int}
+; END.
+
+; ModuleID = 'good.bc'
+target datalayout = "e-p:32:32"
+target endian = little
+target pointersize = 32
+target triple = "i686-pc-linux-gnu"
+ %struct.edgeBox = type { short, short, short, short, short, short }
+%qsz = external global int ; <int*> [#uses=12]
+%thresh = external global int ; <int*> [#uses=2]
+%mthresh = external global int ; <int*> [#uses=1]
+
+implementation ; Functions:
+
+int %qsorte(sbyte* %base, int %n, int %size) {
+entry:
+ %tmp = setgt int %n, 1 ; <bool> [#uses=1]
+ br bool %tmp, label %cond_next, label %return
+
+cond_next: ; preds = %entry
+ store int %size, int* %qsz
+ %tmp3 = shl int %size, ubyte 2 ; <int> [#uses=1]
+ store int %tmp3, int* %thresh
+ %tmp4 = load int* %qsz ; <int> [#uses=1]
+ %tmp5 = mul int %tmp4, 6 ; <int> [#uses=1]
+ store int %tmp5, int* %mthresh
+ %tmp6 = load int* %qsz ; <int> [#uses=1]
+ %tmp8 = mul int %tmp6, %n ; <int> [#uses=1]
+ %tmp9 = getelementptr sbyte* %base, int %tmp8 ; <sbyte*> [#uses=3]
+ %tmp11 = setgt int %n, 3 ; <bool> [#uses=1]
+ br bool %tmp11, label %cond_true12, label %bb30
+
+cond_true12: ; preds = %cond_next
+ %tmp156 = call int %qste( sbyte* %base, sbyte* %tmp9 ) ; <int> [#uses=0]
+ %tmp16 = load int* %thresh ; <int> [#uses=1]
+ %tmp18 = getelementptr sbyte* %base, int %tmp16 ; <sbyte*> [#uses=2]
+ %tmp3117 = load int* %qsz ; <int> [#uses=1]
+ %tmp3318 = getelementptr sbyte* %base, int %tmp3117 ; <sbyte*> [#uses=2]
+ %tmp3621 = setlt sbyte* %tmp3318, %tmp18 ; <bool> [#uses=1]
+ br bool %tmp3621, label %bb, label %bb37
+
+bb: ; preds = %bb30, %cond_true12
+ %hi.0.0 = phi sbyte* [ %tmp18, %cond_true12 ], [ %hi.0, %bb30 ] ; <sbyte*> [#uses=4]
+ %j.1.0 = phi sbyte* [ %base, %cond_true12 ], [ %j.1, %bb30 ] ; <sbyte*> [#uses=4]
+ %tmp33.0 = phi sbyte* [ %tmp3318, %cond_true12 ], [ %tmp33, %bb30 ] ; <sbyte*> [#uses=6]
+ %tmp3 = bitcast sbyte* %j.1.0 to %struct.edgeBox* ; <%struct.edgeBox*> [#uses=1]
+ %tmp4 = bitcast sbyte* %tmp33.0 to %struct.edgeBox* ; <%struct.edgeBox*> [#uses=1]
+ %tmp255 = call int %comparee( %struct.edgeBox* %tmp3, %struct.edgeBox* %tmp4 ) ; <int> [#uses=1]
+ %tmp26 = setgt int %tmp255, 0 ; <bool> [#uses=1]
+ br bool %tmp26, label %cond_true27, label %bb30
+
+cond_true27: ; preds = %bb
+ br label %bb30
+
+bb30: ; preds = %cond_true27, %bb, %cond_next
+ %hi.0.3 = phi sbyte* [ %hi.0.0, %cond_true27 ], [ %hi.0.0, %bb ], [ undef, %cond_next ] ; <sbyte*> [#uses=0]
+ %j.1.3 = phi sbyte* [ %j.1.0, %cond_true27 ], [ %j.1.0, %bb ], [ undef, %cond_next ] ; <sbyte*> [#uses=0]
+ %tmp33.3 = phi sbyte* [ %tmp33.0, %cond_true27 ], [ %tmp33.0, %bb ], [ undef, %cond_next ] ; <sbyte*> [#uses=0]
+ %hi.0 = phi sbyte* [ %tmp9, %cond_next ], [ %hi.0.0, %bb ], [ %hi.0.0, %cond_true27 ] ; <sbyte*> [#uses=2]
+ %lo.1 = phi sbyte* [ %tmp33.0, %cond_true27 ], [ %tmp33.0, %bb ], [ %base, %cond_next ] ; <sbyte*> [#uses=1]
+ %j.1 = phi sbyte* [ %tmp33.0, %cond_true27 ], [ %j.1.0, %bb ], [ %base, %cond_next ] ; <sbyte*> [#uses=2]
+ %tmp31 = load int* %qsz ; <int> [#uses=1]
+ %tmp33 = getelementptr sbyte* %lo.1, int %tmp31 ; <sbyte*> [#uses=2]
+ %tmp36 = setlt sbyte* %tmp33, %hi.0 ; <bool> [#uses=1]
+ br bool %tmp36, label %bb, label %bb37
+
+bb37: ; preds = %bb30, %cond_true12
+ %j.1.1 = phi sbyte* [ %j.1, %bb30 ], [ %base, %cond_true12 ] ; <sbyte*> [#uses=4]
+ %tmp40 = seteq sbyte* %j.1.1, %base ; <bool> [#uses=1]
+ br bool %tmp40, label %bb115, label %cond_true41
+
+cond_true41: ; preds = %bb37
+ %tmp43 = load int* %qsz ; <int> [#uses=1]
+ %tmp45 = getelementptr sbyte* %base, int %tmp43 ; <sbyte*> [#uses=2]
+ %tmp6030 = setlt sbyte* %base, %tmp45 ; <bool> [#uses=1]
+ br bool %tmp6030, label %bb46, label %bb115
+
+bb46: ; preds = %bb46, %cond_true41
+ %j.2.0 = phi sbyte* [ %j.1.1, %cond_true41 ], [ %tmp52, %bb46 ] ; <sbyte*> [#uses=3]
+ %i.2.0 = phi sbyte* [ %base, %cond_true41 ], [ %tmp56, %bb46 ] ; <sbyte*> [#uses=3]
+ %tmp = load sbyte* %j.2.0 ; <sbyte> [#uses=2]
+ %tmp49 = load sbyte* %i.2.0 ; <sbyte> [#uses=1]
+ store sbyte %tmp49, sbyte* %j.2.0
+ %tmp52 = getelementptr sbyte* %j.2.0, int 1 ; <sbyte*> [#uses=2]
+ store sbyte %tmp, sbyte* %i.2.0
+ %tmp56 = getelementptr sbyte* %i.2.0, int 1 ; <sbyte*> [#uses=3]
+ %tmp60 = setlt sbyte* %tmp56, %tmp45 ; <bool> [#uses=1]
+ br bool %tmp60, label %bb46, label %bb115
+
+bb66: ; preds = %bb115, %bb66
+ %hi.3 = phi sbyte* [ %tmp118, %bb115 ], [ %tmp70, %bb66 ] ; <sbyte*> [#uses=2]
+ %tmp67 = load int* %qsz ; <int> [#uses=2]
+ %tmp68 = sub int 0, %tmp67 ; <int> [#uses=1]
+ %tmp70 = getelementptr sbyte* %hi.3, int %tmp68 ; <sbyte*> [#uses=2]
+ %tmp = bitcast sbyte* %tmp70 to %struct.edgeBox* ; <%struct.edgeBox*> [#uses=1]
+ %tmp1 = bitcast sbyte* %tmp118 to %struct.edgeBox* ; <%struct.edgeBox*> [#uses=1]
+ %tmp732 = call int %comparee( %struct.edgeBox* %tmp, %struct.edgeBox* %tmp1 ) ; <int> [#uses=1]
+ %tmp74 = setgt int %tmp732, 0 ; <bool> [#uses=1]
+ br bool %tmp74, label %bb66, label %bb75
+
+bb75: ; preds = %bb66
+ %tmp76 = load int* %qsz ; <int> [#uses=1]
+ %tmp70.sum = sub int %tmp76, %tmp67 ; <int> [#uses=1]
+ %tmp78 = getelementptr sbyte* %hi.3, int %tmp70.sum ; <sbyte*> [#uses=3]
+ %tmp81 = seteq sbyte* %tmp78, %tmp118 ; <bool> [#uses=1]
+ br bool %tmp81, label %bb115, label %cond_true82
+
+cond_true82: ; preds = %bb75
+ %tmp83 = load int* %qsz ; <int> [#uses=1]
+ %tmp118.sum = add int %tmp116, %tmp83 ; <int> [#uses=1]
+ %tmp85 = getelementptr sbyte* %min.1, int %tmp118.sum ; <sbyte*> [#uses=1]
+ %tmp10937 = getelementptr sbyte* %tmp85, int -1 ; <sbyte*> [#uses=3]
+ %tmp11239 = setlt sbyte* %tmp10937, %tmp118 ; <bool> [#uses=1]
+ br bool %tmp11239, label %bb115, label %bb86
+
+bb86: ; preds = %bb104, %cond_true82
+ %tmp109.0 = phi sbyte* [ %tmp10937, %cond_true82 ], [ %tmp109, %bb104 ] ; <sbyte*> [#uses=5]
+ %i.5.2 = phi sbyte* [ %i.5.3, %cond_true82 ], [ %i.5.1, %bb104 ] ; <sbyte*> [#uses=0]
+ %tmp100.2 = phi sbyte* [ %tmp100.3, %cond_true82 ], [ %tmp100.1, %bb104 ] ; <sbyte*> [#uses=0]
+ %tmp88 = load sbyte* %tmp109.0 ; <sbyte> [#uses=2]
+ %tmp9746 = load int* %qsz ; <int> [#uses=1]
+ %tmp9847 = sub int 0, %tmp9746 ; <int> [#uses=1]
+ %tmp10048 = getelementptr sbyte* %tmp109.0, int %tmp9847 ; <sbyte*> [#uses=3]
+ %tmp10350 = setlt sbyte* %tmp10048, %tmp78 ; <bool> [#uses=1]
+ br bool %tmp10350, label %bb104, label %bb91
+
+bb91: ; preds = %bb91, %bb86
+ %i.5.0 = phi sbyte* [ %tmp109.0, %bb86 ], [ %tmp100.0, %bb91 ] ; <sbyte*> [#uses=1]
+ %tmp100.0 = phi sbyte* [ %tmp10048, %bb86 ], [ %tmp100, %bb91 ] ; <sbyte*> [#uses=4]
+ %tmp93 = load sbyte* %tmp100.0 ; <sbyte> [#uses=1]
+ store sbyte %tmp93, sbyte* %i.5.0
+ %tmp97 = load int* %qsz ; <int> [#uses=1]
+ %tmp98 = sub int 0, %tmp97 ; <int> [#uses=1]
+ %tmp100 = getelementptr sbyte* %tmp100.0, int %tmp98 ; <sbyte*> [#uses=3]
+ %tmp103 = setlt sbyte* %tmp100, %tmp78 ; <bool> [#uses=1]
+ br bool %tmp103, label %bb104, label %bb91
+
+bb104: ; preds = %bb91, %bb86
+ %i.5.1 = phi sbyte* [ %tmp109.0, %bb86 ], [ %tmp100.0, %bb91 ] ; <sbyte*> [#uses=4]
+ %tmp100.1 = phi sbyte* [ %tmp10048, %bb86 ], [ %tmp100, %bb91 ] ; <sbyte*> [#uses=3]
+ store sbyte %tmp88, sbyte* %i.5.1
+ %tmp109 = getelementptr sbyte* %tmp109.0, int -1 ; <sbyte*> [#uses=3]
+ %tmp112 = setlt sbyte* %tmp109, %tmp118 ; <bool> [#uses=1]
+ br bool %tmp112, label %bb115, label %bb86
+
+bb115: ; preds = %bb104, %cond_true82, %bb75, %bb46, %cond_true41, %bb37
+ %tmp109.1 = phi sbyte* [ undef, %bb37 ], [ %tmp109.1, %bb75 ], [ %tmp10937, %cond_true82 ], [ %tmp109, %bb104 ], [ undef, %bb46 ], [ undef, %cond_true41 ] ; <sbyte*> [#uses=1]
+ %i.5.3 = phi sbyte* [ undef, %bb37 ], [ %i.5.3, %bb75 ], [ %i.5.3, %cond_true82 ], [ %i.5.1, %bb104 ], [ undef, %bb46 ], [ undef, %cond_true41 ] ; <sbyte*> [#uses=3]
+ %tmp100.3 = phi sbyte* [ undef, %bb37 ], [ %tmp100.3, %bb75 ], [ %tmp100.3, %cond_true82 ], [ %tmp100.1, %bb104 ], [ undef, %bb46 ], [ undef, %cond_true41 ] ; <sbyte*> [#uses=3]
+ %min.1 = phi sbyte* [ %tmp118, %bb104 ], [ %tmp118, %bb75 ], [ %base, %bb37 ], [ %base, %bb46 ], [ %base, %cond_true41 ], [ %tmp118, %cond_true82 ] ; <sbyte*> [#uses=2]
+ %j.5 = phi sbyte* [ %tmp100.1, %bb104 ], [ %j.5, %bb75 ], [ %tmp52, %bb46 ], [ %j.1.1, %bb37 ], [ %j.1.1, %cond_true41 ], [ %j.5, %cond_true82 ] ; <sbyte*> [#uses=2]
+ %i.4 = phi sbyte* [ %i.5.1, %bb104 ], [ %i.4, %bb75 ], [ %tmp56, %bb46 ], [ undef, %bb37 ], [ %base, %cond_true41 ], [ %i.4, %cond_true82 ] ; <sbyte*> [#uses=2]
+ %c.4 = phi sbyte [ %tmp88, %bb104 ], [ %c.4, %bb75 ], [ %tmp, %bb46 ], [ undef, %bb37 ], [ undef, %cond_true41 ], [ %c.4, %cond_true82 ] ; <sbyte> [#uses=2]
+ %tmp116 = load int* %qsz ; <int> [#uses=2]
+ %tmp118 = getelementptr sbyte* %min.1, int %tmp116 ; <sbyte*> [#uses=9]
+ %tmp122 = setlt sbyte* %tmp118, %tmp9 ; <bool> [#uses=1]
+ br bool %tmp122, label %bb66, label %return
+
+return: ; preds = %bb115, %entry
+ ret int undef
+}
+
+declare int %qste(sbyte*, sbyte*)
+
+declare int %comparee(%struct.edgeBox*, %struct.edgeBox*)
diff --git a/test/Transforms/InstCombine/2006-12-15-Range-Test.ll b/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
new file mode 100644
index 0000000..cf253a9
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
@@ -0,0 +1,36 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep icmp | wc -l | grep 1
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep {icmp ugt} | wc -l | grep 1
+; END.
+
+; ModuleID = 'bugpoint-tooptimize.bc'
+target datalayout = "e-p:32:32"
+target endian = little
+target pointersize = 32
+target triple = "i686-pc-linux-gnu"
+%r = external global [17 x int] ; <[17 x int]*> [#uses=1]
+
+implementation ; Functions:
+
+bool %print_pgm_cond_true(int %tmp12.reload, int* %tmp16.out) {
+newFuncRoot:
+ br label %cond_true
+
+bb27.exitStub: ; preds = %cond_true
+ store int %tmp16, int* %tmp16.out
+ ret bool true
+
+cond_next23.exitStub: ; preds = %cond_true
+ store int %tmp16, int* %tmp16.out
+ ret bool false
+
+cond_true: ; preds = %newFuncRoot
+ %tmp15 = getelementptr [17 x int]* %r, int 0, int %tmp12.reload ; <int*> [#uses=1]
+ %tmp16 = load int* %tmp15 ; <int> [#uses=4]
+ %tmp18 = icmp slt int %tmp16, -31 ; <bool> [#uses=1]
+ %tmp21 = icmp sgt int %tmp16, 31 ; <bool> [#uses=1]
+ %bothcond = or bool %tmp18, %tmp21 ; <bool> [#uses=1]
+ br bool %bothcond, label %bb27.exitStub, label %cond_next23.exitStub
+}
+
diff --git a/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll b/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll
new file mode 100644
index 0000000..044b945
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll
@@ -0,0 +1,35 @@
+; For PR1065. This causes an assertion in instcombine if a select with two cmp
+; operands is encountered.
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output
+; END.
+
+; ModuleID = 'PR1065.bc'
+target datalayout = "e-p:32:32"
+target endian = little
+target pointersize = 32
+target triple = "i686-pc-linux-gnu"
+ %struct.internal_state = type { int }
+ %struct.mng_data = type { uint, sbyte*, uint, uint, uint, uint, uint, uint, uint, uint, uint, ubyte, uint, uint, uint, sbyte, uint, uint, uint, uint, ushort, ushort, ushort, sbyte, sbyte, double, double, double, sbyte, sbyte, sbyte, sbyte, uint, uint, uint, uint, int, sbyte, int, int, sbyte*, sbyte* (uint)*, void (sbyte*, uint)*, void (sbyte*, sbyte*, uint)*, sbyte (%struct.mng_data*)*, sbyte (%struct.mng_data*)*, sbyte (%struct.mng_data*, sbyte*, uint, uint*)*, sbyte (%struct.mng_data*, sbyte*, uint, uint*)*, sbyte (%struct.mng_data*, int, sbyte, int, uint, int, int, sbyte*)*, sbyte (%struct.mng_data*, int, int, sbyte*)*, sbyte (%struct.mng_data*, uint, uint)*, sbyte (%struct.mng_data*, ubyte, sbyte*, sbyte*, sbyte*, sbyte*)*, sbyte (%struct.mng_data*)*, sbyte (%struct.mng_data*, sbyte*)*, sbyte (%struct.mng_data*, sbyte*)*, sbyte (%struct.mng_data*, uint, uint)*, sbyte (%struct.mng_data*, int, uint, sbyte*)*, sbyte (%struct.mng_data*, ubyte, ubyte, uint, uint)*, sbyte* (%struct.mng_data*, uint)*, sbyte* (%struct.mng_data*, uint)*, sbyte* (%struct.mng_data*, uint)*, sbyte (%struct.mng_data*, uint, uint, uint, uint)*, uint (%struct.mng_data*)*, sbyte (%struct.mng_data*, uint)*, sbyte (%struct.mng_data*, uint)*, sbyte (%struct.mng_data*, uint, uint, uint, uint, uint, uint, uint, uint)*, sbyte (%struct.mng_data*, ubyte)*, sbyte (%struct.mng_data*, uint, sbyte*)*, sbyte (%struct.mng_data*, uint, sbyte, sbyte*)*, sbyte, int, uint, sbyte*, sbyte*, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, uint, uint, ubyte, ubyte, ubyte, ubyte, ubyte, uint, sbyte, sbyte, sbyte, uint, ubyte*, uint, ubyte*, uint, sbyte, ubyte, sbyte, uint, ubyte*, ubyte*, uint, uint, ubyte*, ubyte*, %struct.mng_pushdata*, %struct.mng_pushdata*, %struct.mng_pushdata*, %struct.mng_pushdata*, sbyte, sbyte, int, uint, ubyte*, sbyte, sbyte, uint, uint, uint, uint, uint, uint, sbyte, sbyte, sbyte, sbyte, int, int, sbyte*, uint, uint, uint, sbyte, sbyte, uint, uint, uint, uint, sbyte, sbyte, ubyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, uint, sbyte*, sbyte*, sbyte*, uint, sbyte*, sbyte*, sbyte*, sbyte*, sbyte*, %struct.mng_savedata*, uint, uint, uint, uint, sbyte, int, int, int, int, int, int, int, int, int, int, int, int, uint, uint, uint, uint, ubyte*, ubyte*, ubyte*, sbyte, sbyte, int, int, int, int, int, int, int, int, int, sbyte*, sbyte*, sbyte*, sbyte*, sbyte*, sbyte*, [256 x ubyte], double, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, ushort, sbyte, ubyte, sbyte, ubyte, sbyte, int, int, sbyte, int, int, int, int, ushort, ushort, ushort, ubyte, ushort, ubyte, int, int, uint, uint, ubyte, uint, uint, sbyte, int, int, int, int, ubyte, uint, uint, sbyte, int, int, int, int, uint, sbyte, uint, ubyte, ushort, ushort, ushort, short, uint, [256 x %struct.mng_palette8e], uint, [256 x ubyte], uint, uint, uint, uint, uint, uint, uint, uint, uint, ubyte, uint, sbyte*, ushort, ushort, ushort, sbyte*, ubyte, ubyte, uint, uint, uint, uint, sbyte, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, sbyte*, ubyte, ubyte, ubyte, uint, sbyte*, sbyte*, ushort, ushort, ushort, ushort, int, int, sbyte*, %struct.z_stream, int, int, int, int, int, uint, sbyte, sbyte, [256 x uint], sbyte }
+ %struct.mng_palette8e = type { ubyte, ubyte, ubyte }
+ %struct.mng_pushdata = type { sbyte*, sbyte*, uint, sbyte, ubyte*, uint }
+ %struct.mng_savedata = type { sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, sbyte, ushort, ushort, ushort, ubyte, ushort, ubyte, ubyte, uint, uint, sbyte, int, int, int, int, uint, [256 x %struct.mng_palette8e], uint, [256 x ubyte], uint, uint, uint, uint, uint, uint, uint, uint, uint, ubyte, uint, sbyte*, ushort, ushort, ushort }
+ %struct.z_stream = type { ubyte*, uint, uint, ubyte*, uint, uint, sbyte*, %struct.internal_state*, sbyte* (sbyte*, uint, uint)*, void (sbyte*, sbyte*)*, sbyte*, int, uint, uint }
+
+implementation ; Functions:
+
+void %mng_write_basi() {
+entry:
+ %tmp = load ubyte* null ; <ubyte> [#uses=1]
+ %tmp = icmp ugt ubyte %tmp, 8 ; <bool> [#uses=1]
+ %tmp = load ushort* null ; <ushort> [#uses=2]
+ %tmp3 = icmp eq ushort %tmp, 255 ; <bool> [#uses=1]
+ %tmp7 = icmp eq ushort %tmp, -1 ; <bool> [#uses=1]
+ %bOpaque.0.in = select bool %tmp, bool %tmp7, bool %tmp3 ; <bool> [#uses=1]
+ br bool %bOpaque.0.in, label %cond_next90, label %bb95
+
+cond_next90: ; preds = %entry
+ ret void
+
+bb95: ; preds = %entry
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll b/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll
new file mode 100644
index 0000000..a5ee87b
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep zext
+; PR1107
+
+define i1 @test(i8 %A, i8 %B) {
+ %a = zext i8 %A to i32
+ %b = zext i8 %B to i32
+ %c = icmp sgt i32 %a, %b
+ ret i1 %c
+}
diff --git a/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll b/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll
new file mode 100644
index 0000000..073d3a1
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll
@@ -0,0 +1,6 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {fcmp uno.*0.0}
+; PR1111
+define i1 @test(double %X) {
+ %tmp = fcmp une double %X, %X
+ ret i1 %tmp
+}
diff --git a/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll b/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll
new file mode 100644
index 0000000..83d05d9
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-as < %s | opt -instcombine -disable-output
+
+define <4 x i32> @test(<4 x i32> %A) {
+ %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
+ %C = and <4 x i32> %B, < i32 -1, i32 -1, i32 -1, i32 -1 >
+ ret <4 x i32> %C
+}
diff --git a/test/Transforms/InstCombine/2007-01-27-AndICmp.ll b/test/Transforms/InstCombine/2007-01-27-AndICmp.ll
new file mode 100644
index 0000000..0e8c5b1
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-01-27-AndICmp.ll
@@ -0,0 +1,8 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {ugt.*, 1}
+
+define i1 @test(i32 %tmp1030) {
+ %tmp1037 = icmp ne i32 %tmp1030, 40 ; <i1> [#uses=1]
+ %tmp1039 = icmp ne i32 %tmp1030, 41 ; <i1> [#uses=1]
+ %tmp1042 = and i1 %tmp1037, %tmp1039 ; <i1> [#uses=1]
+ ret i1 %tmp1042
+}
diff --git a/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll b/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll
new file mode 100644
index 0000000..e559cdd
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll
@@ -0,0 +1,45 @@
+; RUN: llvm-as < %s | opt -instcombine -mem2reg | llvm-dis | grep {%A = alloca}
+; RUN: llvm-as < %s | opt -instcombine -mem2reg | llvm-dis | \
+; RUN: not grep {%B = alloca}
+; END.
+
+; Ensure that instcombine doesn't sink the loads in entry/cond_true into
+; cond_next. Doing so prevents mem2reg from promoting the B alloca.
+
+define i32 @test2(i32 %C) {
+entry:
+ %A = alloca i32
+ %B = alloca i32
+ %tmp = call i32 (...)* @bar( i32* %A ) ; <i32> [#uses=0]
+ %T = load i32* %A ; <i32> [#uses=1]
+ %tmp2 = icmp eq i32 %C, 0 ; <i1> [#uses=1]
+ br i1 %tmp2, label %cond_next, label %cond_true
+
+cond_true: ; preds = %entry
+ store i32 123, i32* %B
+ call i32 @test2( i32 123 ) ; <i32>:0 [#uses=0]
+ %T1 = load i32* %B ; <i32> [#uses=1]
+ br label %cond_next
+
+cond_next: ; preds = %cond_true, %entry
+ %tmp1.0 = phi i32 [ %T1, %cond_true ], [ %T, %entry ] ; <i32> [#uses=1]
+ %tmp7 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp8 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp9 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp10 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp11 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp12 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp13 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp14 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp15 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp16 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp17 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp18 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp19 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp20 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ ret i32 %tmp1.0
+}
+
+declare i32 @bar(...)
+
+declare i32 @baq(...)
diff --git a/test/Transforms/InstCombine/2007-02-07-PointerCast.ll b/test/Transforms/InstCombine/2007-02-07-PointerCast.ll
new file mode 100644
index 0000000..f38b4c1
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-02-07-PointerCast.ll
@@ -0,0 +1,26 @@
+;RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep zext
+
+; Make sure the uint isn't removed. Instcombine in llvm 1.9 was dropping the
+; uint cast which was causing a sign extend. This only affected code with
+; pointers in the high half of memory, so it wasn't noticed much
+; compile a kernel though...
+
+target datalayout = "e-p:32:32"
+target endian = little
+target pointersize = 32
+
+%str = internal constant [6 x sbyte] c"%llx\0A\00"
+
+implementation ; Functions:
+
+declare int %printf(sbyte*, ...)
+
+int %main(int %x, sbyte** %a) {
+entry:
+ %tmp = getelementptr [6 x sbyte]* %str, int 0, uint 0
+ %tmp1 = load sbyte** %a
+ %tmp2 = cast sbyte* %tmp1 to uint ; <uint> [#uses=1]
+ %tmp3 = cast uint %tmp2 to long ; <long> [#uses=1]
+ %tmp = call int (sbyte*, ...)* %printf( sbyte* %tmp, long %tmp3 )
+ ret int 0
+}
diff --git a/test/Transforms/InstCombine/2007-02-23-PhiFoldInfLoop.ll b/test/Transforms/InstCombine/2007-02-23-PhiFoldInfLoop.ll
new file mode 100644
index 0000000..d60da44
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-02-23-PhiFoldInfLoop.ll
@@ -0,0 +1,31 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep ret
+; PR1217
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+ %struct.termbox = type { %struct.termbox*, i32, i32, i32, i32, i32 }
+
+
+define void @ggenorien() {
+entry:
+ %tmp68 = icmp eq %struct.termbox* null, null ; <i1> [#uses=1]
+ br i1 %tmp68, label %cond_next448, label %bb80
+
+bb80: ; preds = %entry
+ ret void
+
+cond_next448: ; preds = %entry
+ br i1 false, label %bb756, label %bb595
+
+bb595: ; preds = %cond_next448
+ br label %bb609
+
+bb609: ; preds = %bb756, %bb595
+ %termnum.6240.0 = phi i32 [ 2, %bb595 ], [ %termnum.6, %bb756 ] ; <i32> [#uses=1]
+ %tmp755 = add i32 %termnum.6240.0, 1 ; <i32> [#uses=1]
+ br label %bb756
+
+bb756: ; preds = %bb609, %cond_next448
+ %termnum.6 = phi i32 [ %tmp755, %bb609 ], [ 2, %cond_next448 ] ; <i32> [#uses=1]
+ br label %bb609
+}
diff --git a/test/Transforms/InstCombine/2007-03-13-CompareMerge.ll b/test/Transforms/InstCombine/2007-03-13-CompareMerge.ll
new file mode 100644
index 0000000..d101050
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-13-CompareMerge.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {icmp sle}
+; PR1244
+
+define i1 @test(i32 %c.3.i, i32 %d.292.2.i) {
+ %tmp266.i = icmp slt i32 %c.3.i, %d.292.2.i
+ %tmp276.i = icmp eq i32 %c.3.i, %d.292.2.i
+ %sel_tmp80 = or i1 %tmp266.i, %tmp276.i
+ ret i1 %sel_tmp80
+}
diff --git a/test/Transforms/InstCombine/2007-03-19-BadTruncChangePR1261.ll b/test/Transforms/InstCombine/2007-03-19-BadTruncChangePR1261.ll
new file mode 100644
index 0000000..da58dec
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-19-BadTruncChangePR1261.ll
@@ -0,0 +1,10 @@
+; RUN: llvm-as %s -o - | opt -instcombine | llvm-dis | grep zext
+; PR1261.
+
+define i16 @test(i31 %zzz) {
+ %A = sext i31 %zzz to i32
+ %B = add i32 %A, 16384
+ %C = lshr i32 %B, 15
+ %D = trunc i32 %C to i16
+ ret i16 %D
+}
diff --git a/test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll b/test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll
new file mode 100644
index 0000000..c8dafd1
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll
@@ -0,0 +1,7 @@
+; For PR1248
+; RUN: llvm-as %s -o - | opt -instcombine | llvm-dis | grep {ugt i32 .*, 11}
+define i1 @test(i32 %tmp6) {
+ %tmp7 = sdiv i32 %tmp6, 12 ; <i32> [#uses=1]
+ icmp ne i32 %tmp7, -6 ; <i1>:1 [#uses=1]
+ ret i1 %1
+}
diff --git a/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll b/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll
new file mode 100644
index 0000000..0b05f7c
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll
@@ -0,0 +1,29 @@
+; PR1271
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: grep {icmp eq i32 .tmp.*, 2146435072}
+%struct..0anon = type { i32, i32 }
+%struct..1anon = type { double }
+
+define i32 @main() {
+entry:
+ %u = alloca %struct..1anon, align 8 ; <%struct..1anon*> [#uses=4]
+ %tmp1 = getelementptr %struct..1anon* %u, i32 0, i32 0 ; <double*> [#uses=1]
+ store double 0x7FF0000000000000, double* %tmp1
+ %tmp3 = getelementptr %struct..1anon* %u, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp34 = bitcast double* %tmp3 to %struct..0anon* ; <%struct..0anon*> [#uses=1]
+ %tmp5 = getelementptr %struct..0anon* %tmp34, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp6 = load i32* %tmp5 ; <i32> [#uses=1]
+ %tmp7 = shl i32 %tmp6, 1 ; <i32> [#uses=1]
+ %tmp8 = lshr i32 %tmp7, 21 ; <i32> [#uses=1]
+ %tmp89 = trunc i32 %tmp8 to i16 ; <i16> [#uses=1]
+ icmp ne i16 %tmp89, 2047 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i8 ; <i8>:1 [#uses=1]
+ icmp ne i8 %1, 0 ; <i1>:2 [#uses=1]
+ br i1 %2, label %cond_true, label %cond_false
+
+cond_true: ; preds = %entry
+ ret i32 0
+
+cond_false: ; preds = %entry
+ ret i32 1
+}
diff --git a/test/Transforms/InstCombine/2007-03-25-DoubleShift.ll b/test/Transforms/InstCombine/2007-03-25-DoubleShift.ll
new file mode 100644
index 0000000..d67e1a1
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-25-DoubleShift.ll
@@ -0,0 +1,9 @@
+; PR1271
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep and
+define i1 @test(i32 %tmp13) {
+entry:
+ %tmp14 = shl i32 %tmp13, 12 ; <i32> [#uses=1]
+ %tmp15 = lshr i32 %tmp14, 12 ; <i32> [#uses=1]
+ %res = icmp ne i32 %tmp15, 0 ; <i1>:3 [#uses=1]
+ ret i1 %res
+}
diff --git a/test/Transforms/InstCombine/2007-03-26-BadShiftMask.ll b/test/Transforms/InstCombine/2007-03-26-BadShiftMask.ll
new file mode 100644
index 0000000..4a2e60e
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-26-BadShiftMask.ll
@@ -0,0 +1,36 @@
+; PR1271
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: grep {ashr i32 %.mp137, 2}
+; END.
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "i686-pc-linux-gnu"
+
+
+define i1 @test(i32* %tmp141, i32* %tmp145,
+ i32 %b8, i32 %iftmp.430.0, i32* %tmp134.out, i32* %tmp137.out)
+{
+newFuncRoot:
+ %tmp133 = and i32 %b8, 1 ; <i32> [#uses=1]
+ %tmp134 = shl i32 %tmp133, 3 ; <i32> [#uses=3]
+ %tmp136 = ashr i32 %b8, 1 ; <i32> [#uses=1]
+ %tmp137 = shl i32 %tmp136, 3 ; <i32> [#uses=3]
+ %tmp139 = ashr i32 %tmp134, 2 ; <i32> [#uses=1]
+ store i32 %tmp139, i32* %tmp141
+ %tmp143 = ashr i32 %tmp137, 2 ; <i32> [#uses=1]
+ store i32 %tmp143, i32* %tmp145
+ icmp eq i32 %iftmp.430.0, 0 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i8 ; <i8>:1 [#uses=1]
+ icmp ne i8 %1, 0 ; <i1>:2 [#uses=1]
+ br i1 %2, label %cond_true147.exitStub, label %cond_false252.exitStub
+
+cond_true147.exitStub: ; preds = %newFuncRoot
+ store i32 %tmp134, i32* %tmp134.out
+ store i32 %tmp137, i32* %tmp137.out
+ ret i1 true
+
+cond_false252.exitStub: ; preds = %newFuncRoot
+ store i32 %tmp134, i32* %tmp134.out
+ store i32 %tmp137, i32* %tmp137.out
+ ret i1 false
+}
diff --git a/test/Transforms/InstCombine/2007-03-27-PR1280.ll b/test/Transforms/InstCombine/2007-03-27-PR1280.ll
new file mode 100644
index 0000000..6cb9aae
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-27-PR1280.ll
@@ -0,0 +1,15 @@
+; PR1280 - we should be able to reduce this function to a trunc/sext but it
+; would involve using a bit width (24) that doesn't match a size that
+; the back end can handle. This test makes sure that such a transform
+; is not done. It should be removed when code gen supports "funny"
+; bit widths.
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {add i49.*-8388608}
+
+define i49 @test5(i49 %x) {
+ ;; If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
+ %X = and i49 %x, 16777215 ; 0x0000000ffffff
+ %tmp.2 = xor i49 %X, 8388608 ; 0x0000000800000
+ %tmp.4 = add i49 %tmp.2, -8388608 ; 0x1FFFFFF800000
+ ret i49 %tmp.4
+}
diff --git a/test/Transforms/InstCombine/2007-03-31-InfiniteLoop.ll b/test/Transforms/InstCombine/2007-03-31-InfiniteLoop.ll
new file mode 100644
index 0000000..640f6d5
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-31-InfiniteLoop.ll
@@ -0,0 +1,302 @@
+; RUN: llvm-as < %s | opt -instcombine -disable-output
+; END.
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "i686-apple-darwin8"
+ %struct.ZZIP_FILE = type { %struct.zzip_dir*, i32, i32, i32, i32, i32, i32, i64, i8*, i64, %struct.z_stream, %struct.zzip_plugin_io* }
+ %struct.anon = type { %struct.ZZIP_FILE*, i8* }
+ %struct.internal_state = type { i32 }
+ %struct.z_stream = type { i8*, i32, i32, i8*, i32, i32, i8*, %struct.internal_state*, i8* (i8*, i32, i32)*, void (i8*, i8*)*, i8*, i32, i32, i32 }
+ %struct.zzip_dir = type { i32, i32, i32, %struct.anon, %struct.zzip_dir_hdr*, %struct.zzip_dir_hdr*, %struct.ZZIP_FILE*, %struct.zzip_dirent, i8*, i8*, i8**, %struct.zzip_plugin_io* }
+ %struct.zzip_dir_hdr = type { i32, i32, i32, i32, i16, i16, i8, i16, [1 x i8] }
+ %struct.zzip_dirent = type { i32, i32, i32, i16, i8*, i32, i32 }
+ %struct.zzip_plugin_io = type { i32 (i8*, i32, ...)*, i32 (i32)*, i32 (i32, i8*, i32)*, i64 (i32, i64, i32)*, i64 (i32)*, i32 }
+
+define %struct.ZZIP_FILE* @zzip_open_shared_io(%struct.ZZIP_FILE* %stream, i8* %filename, i32 %o_flags, i32 %o_modes, i8** %ext, %struct.zzip_plugin_io* %io) {
+entry:
+ %basename = alloca [1024 x i8], align 16 ; <[1024 x i8]*> [#uses=5]
+ %e = alloca i32, align 4 ; <i32*> [#uses=4]
+ icmp eq %struct.ZZIP_FILE* %stream, null ; <i1>:0 [#uses=1]
+ br i1 %0, label %cond_next22, label %cond_true
+
+cond_true: ; preds = %entry
+ %tmp3 = getelementptr %struct.ZZIP_FILE* %stream, i32 0, i32 0 ; <%struct.zzip_dir**> [#uses=1]
+ %tmp4 = load %struct.zzip_dir** %tmp3 ; <%struct.zzip_dir*> [#uses=1]
+ icmp eq %struct.zzip_dir* %tmp4, null ; <i1>:1 [#uses=1]
+ br i1 %1, label %cond_next22, label %cond_true5
+
+cond_true5: ; preds = %cond_true
+ icmp eq i8** %ext, null ; <i1>:2 [#uses=1]
+ br i1 %2, label %cond_true7, label %cond_next
+
+cond_true7: ; preds = %cond_true5
+ %tmp9 = getelementptr %struct.ZZIP_FILE* %stream, i32 0, i32 0 ; <%struct.zzip_dir**> [#uses=1]
+ %tmp10 = load %struct.zzip_dir** %tmp9 ; <%struct.zzip_dir*> [#uses=1]
+ %tmp11 = getelementptr %struct.zzip_dir* %tmp10, i32 0, i32 10 ; <i8***> [#uses=1]
+ %tmp12 = load i8*** %tmp11 ; <i8**> [#uses=1]
+ br label %cond_next
+
+cond_next: ; preds = %cond_true7, %cond_true5
+ %ext_addr.0 = phi i8** [ %ext, %cond_true5 ], [ %tmp12, %cond_true7 ] ; <i8**> [#uses=2]
+ icmp eq %struct.zzip_plugin_io* %io, null ; <i1>:3 [#uses=1]
+ br i1 %3, label %cond_true14, label %cond_next22
+
+cond_true14: ; preds = %cond_next
+ %tmp16 = getelementptr %struct.ZZIP_FILE* %stream, i32 0, i32 0 ; <%struct.zzip_dir**> [#uses=1]
+ %tmp17 = load %struct.zzip_dir** %tmp16 ; <%struct.zzip_dir*> [#uses=1]
+ %tmp18 = getelementptr %struct.zzip_dir* %tmp17, i32 0, i32 11 ; <%struct.zzip_plugin_io**> [#uses=1]
+ %tmp19 = load %struct.zzip_plugin_io** %tmp18 ; <%struct.zzip_plugin_io*> [#uses=1]
+ br label %cond_next22
+
+cond_next22: ; preds = %cond_true14, %cond_next, %cond_true, %entry
+ %io_addr.0 = phi %struct.zzip_plugin_io* [ %io, %entry ], [ %io, %cond_true ], [ %io, %cond_next ], [ %tmp19, %cond_true14 ] ; <%struct.zzip_plugin_io*> [#uses=2]
+ %ext_addr.1 = phi i8** [ %ext, %entry ], [ %ext, %cond_true ], [ %ext_addr.0, %cond_next ], [ %ext_addr.0, %cond_true14 ] ; <i8**> [#uses=2]
+ icmp eq %struct.zzip_plugin_io* %io_addr.0, null ; <i1>:4 [#uses=1]
+ br i1 %4, label %cond_true24, label %cond_next26
+
+cond_true24: ; preds = %cond_next22
+ %tmp25 = call %struct.zzip_plugin_io* @zzip_get_default_io( ) ; <%struct.zzip_plugin_io*> [#uses=1]
+ br label %cond_next26
+
+cond_next26: ; preds = %cond_true24, %cond_next22
+ %io_addr.1 = phi %struct.zzip_plugin_io* [ %io_addr.0, %cond_next22 ], [ %tmp25, %cond_true24 ] ; <%struct.zzip_plugin_io*> [#uses=4]
+ %tmp28 = and i32 %o_modes, 81920 ; <i32> [#uses=1]
+ icmp eq i32 %tmp28, 0 ; <i1>:5 [#uses=1]
+ br i1 %5, label %try_real, label %try_zzip
+
+try_real: ; preds = %bb223, %cond_next26
+ %fd160.2 = phi i32 [ undef, %cond_next26 ], [ %fd160.0, %bb223 ] ; <i32> [#uses=1]
+ %len.2 = phi i32 [ undef, %cond_next26 ], [ %len.0, %bb223 ] ; <i32> [#uses=1]
+ %o_flags_addr.1 = phi i32 [ %o_flags, %cond_next26 ], [ %o_flags_addr.0, %bb223 ] ; <i32> [#uses=2]
+ %tmp33348 = and i32 %o_modes, 262144 ; <i32> [#uses=1]
+ icmp eq i32 %tmp33348, 0 ; <i1>:6 [#uses=1]
+ br i1 %6, label %cond_next38, label %cond_true35
+
+cond_true35: ; preds = %try_real
+ %tmp36 = call %struct.zzip_plugin_io* @zzip_get_default_io( ) ; <%struct.zzip_plugin_io*> [#uses=1]
+ br label %cond_next38
+
+cond_next38: ; preds = %cond_true35, %try_real
+ %iftmp.21.0 = phi %struct.zzip_plugin_io* [ %tmp36, %cond_true35 ], [ %io_addr.1, %try_real ] ; <%struct.zzip_plugin_io*> [#uses=3]
+ %tmp41 = getelementptr %struct.zzip_plugin_io* %iftmp.21.0, i32 0, i32 0 ; <i32 (i8*, i32, ...)**> [#uses=1]
+ %tmp42 = load i32 (i8*, i32, ...)** %tmp41 ; <i32 (i8*, i32, ...)*> [#uses=1]
+ %tmp45 = call i32 (i8*, i32, ...)* %tmp42( i8* %filename, i32 %o_flags_addr.1 ) ; <i32> [#uses=3]
+ icmp eq i32 %tmp45, -1 ; <i1>:7 [#uses=1]
+ br i1 %7, label %cond_next67, label %cond_true47
+
+cond_true47: ; preds = %cond_next38
+ %tmp48 = call i8* @cli_calloc( i32 1, i32 108 ) ; <i8*> [#uses=2]
+ %tmp4849 = bitcast i8* %tmp48 to %struct.ZZIP_FILE* ; <%struct.ZZIP_FILE*> [#uses=3]
+ icmp eq i8* %tmp48, null ; <i1>:8 [#uses=1]
+ br i1 %8, label %cond_true51, label %cond_next58
+
+cond_true51: ; preds = %cond_true47
+ %tmp53 = getelementptr %struct.zzip_plugin_io* %iftmp.21.0, i32 0, i32 1 ; <i32 (i32)**> [#uses=1]
+ %tmp54 = load i32 (i32)** %tmp53 ; <i32 (i32)*> [#uses=1]
+ %tmp56 = call i32 %tmp54( i32 %tmp45 ) ; <i32> [#uses=0]
+ ret %struct.ZZIP_FILE* null
+
+cond_next58: ; preds = %cond_true47
+ %tmp60 = getelementptr %struct.ZZIP_FILE* %tmp4849, i32 0, i32 1 ; <i32*> [#uses=1]
+ store i32 %tmp45, i32* %tmp60
+ %tmp63 = getelementptr %struct.ZZIP_FILE* %tmp4849, i32 0, i32 11 ; <%struct.zzip_plugin_io**> [#uses=1]
+ store %struct.zzip_plugin_io* %iftmp.21.0, %struct.zzip_plugin_io** %tmp63
+ ret %struct.ZZIP_FILE* %tmp4849
+
+cond_next67: ; preds = %cond_next38
+ %tmp70716 = and i32 %o_modes, 16384 ; <i32> [#uses=1]
+ icmp eq i32 %tmp70716, 0 ; <i1>:9 [#uses=1]
+ br i1 %9, label %try_zzip, label %return
+
+try_zzip: ; preds = %cond_next67, %cond_next26
+ %fd160.3 = phi i32 [ %fd160.2, %cond_next67 ], [ undef, %cond_next26 ] ; <i32> [#uses=6]
+ %len.3 = phi i32 [ %len.2, %cond_next67 ], [ undef, %cond_next26 ] ; <i32> [#uses=3]
+ %o_flags_addr.3 = phi i32 [ %o_flags_addr.1, %cond_next67 ], [ %o_flags, %cond_next26 ] ; <i32> [#uses=4]
+ %tmp76 = and i32 %o_flags_addr.3, 513 ; <i32> [#uses=1]
+ icmp eq i32 %tmp76, 0 ; <i1>:10 [#uses=1]
+ br i1 %10, label %cond_next80, label %cond_true77
+
+cond_true77: ; preds = %try_zzip
+ %tmp78 = call i32* @__error( ) ; <i32*> [#uses=1]
+ store i32 22, i32* %tmp78
+ ret %struct.ZZIP_FILE* null
+
+cond_next80: ; preds = %try_zzip
+ %tmp83844 = and i32 %o_flags_addr.3, 2 ; <i32> [#uses=1]
+ icmp eq i32 %tmp83844, 0 ; <i1>:11 [#uses=1]
+ %tmp87 = xor i32 %o_flags_addr.3, 2 ; <i32> [#uses=1]
+ %o_flags_addr.0 = select i1 %11, i32 %o_flags_addr.3, i32 %tmp87 ; <i32> [#uses=2]
+ %basename90 = getelementptr [1024 x i8]* %basename, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp92 = call i8* @strcpy( i8* %basename90, i8* %filename ) ; <i8*> [#uses=0]
+ icmp eq %struct.ZZIP_FILE* %stream, null ; <i1>:12 [#uses=1]
+ br i1 %12, label %bb219, label %cond_true94
+
+cond_true94: ; preds = %cond_next80
+ %tmp96 = getelementptr %struct.ZZIP_FILE* %stream, i32 0, i32 0 ; <%struct.zzip_dir**> [#uses=1]
+ %tmp97 = load %struct.zzip_dir** %tmp96 ; <%struct.zzip_dir*> [#uses=1]
+ icmp eq %struct.zzip_dir* %tmp97, null ; <i1>:13 [#uses=1]
+ br i1 %13, label %bb219, label %cond_true98
+
+cond_true98: ; preds = %cond_true94
+ %tmp100 = getelementptr %struct.ZZIP_FILE* %stream, i32 0, i32 0 ; <%struct.zzip_dir**> [#uses=1]
+ %tmp101 = load %struct.zzip_dir** %tmp100 ; <%struct.zzip_dir*> [#uses=1]
+ %tmp102 = getelementptr %struct.zzip_dir* %tmp101, i32 0, i32 9 ; <i8**> [#uses=1]
+ %tmp103 = load i8** %tmp102 ; <i8*> [#uses=1]
+ icmp eq i8* %tmp103, null ; <i1>:14 [#uses=1]
+ br i1 %14, label %bb219, label %cond_true104
+
+cond_true104: ; preds = %cond_true98
+ %tmp106 = getelementptr %struct.ZZIP_FILE* %stream, i32 0, i32 0 ; <%struct.zzip_dir**> [#uses=1]
+ %tmp107 = load %struct.zzip_dir** %tmp106 ; <%struct.zzip_dir*> [#uses=1]
+ %tmp108 = getelementptr %struct.zzip_dir* %tmp107, i32 0, i32 9 ; <i8**> [#uses=1]
+ %tmp109 = load i8** %tmp108 ; <i8*> [#uses=1]
+ %tmp110 = call i32 @strlen( i8* %tmp109 ) ; <i32> [#uses=7]
+ %tmp112 = getelementptr %struct.ZZIP_FILE* %stream, i32 0, i32 0 ; <%struct.zzip_dir**> [#uses=1]
+ %tmp113 = load %struct.zzip_dir** %tmp112 ; <%struct.zzip_dir*> [#uses=1]
+ %tmp114 = getelementptr %struct.zzip_dir* %tmp113, i32 0, i32 9 ; <i8**> [#uses=1]
+ %tmp115 = load i8** %tmp114 ; <i8*> [#uses=1]
+ %tmp118 = call i32 @memcmp( i8* %filename, i8* %tmp115, i32 %tmp110 ) ; <i32> [#uses=1]
+ icmp eq i32 %tmp118, 0 ; <i1>:15 [#uses=1]
+ br i1 %15, label %cond_true119, label %bb219
+
+cond_true119: ; preds = %cond_true104
+ %tmp122 = getelementptr i8* %filename, i32 %tmp110 ; <i8*> [#uses=1]
+ %tmp123 = load i8* %tmp122 ; <i8> [#uses=1]
+ icmp eq i8 %tmp123, 47 ; <i1>:16 [#uses=1]
+ br i1 %16, label %cond_true124, label %bb219
+
+cond_true124: ; preds = %cond_true119
+ %tmp126 = add i32 %tmp110, 1 ; <i32> [#uses=1]
+ %tmp128 = getelementptr i8* %filename, i32 %tmp126 ; <i8*> [#uses=1]
+ %tmp129 = load i8* %tmp128 ; <i8> [#uses=1]
+ icmp eq i8 %tmp129, 0 ; <i1>:17 [#uses=1]
+ br i1 %17, label %bb219, label %cond_true130
+
+cond_true130: ; preds = %cond_true124
+ %tmp134.sum = add i32 %tmp110, 1 ; <i32> [#uses=1]
+ %tmp135 = getelementptr i8* %filename, i32 %tmp134.sum ; <i8*> [#uses=1]
+ %tmp137 = getelementptr %struct.ZZIP_FILE* %stream, i32 0, i32 0 ; <%struct.zzip_dir**> [#uses=1]
+ %tmp138 = load %struct.zzip_dir** %tmp137 ; <%struct.zzip_dir*> [#uses=1]
+ %tmp140 = call %struct.ZZIP_FILE* @zzip_file_open( %struct.zzip_dir* %tmp138, i8* %tmp135, i32 %o_modes, i32 -1 ) ; <%struct.ZZIP_FILE*> [#uses=3]
+ icmp eq %struct.ZZIP_FILE* %tmp140, null ; <i1>:18 [#uses=1]
+ br i1 %18, label %cond_true142, label %return
+
+cond_true142: ; preds = %cond_true130
+ %tmp144 = getelementptr %struct.ZZIP_FILE* %stream, i32 0, i32 0 ; <%struct.zzip_dir**> [#uses=1]
+ %tmp145 = load %struct.zzip_dir** %tmp144 ; <%struct.zzip_dir*> [#uses=1]
+ %tmp146 = getelementptr %struct.zzip_dir* %tmp145, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp147 = load i32* %tmp146 ; <i32> [#uses=1]
+ %tmp148 = call i32 @zzip_errno( i32 %tmp147 ) ; <i32> [#uses=1]
+ %tmp149 = call i32* @__error( ) ; <i32*> [#uses=1]
+ store i32 %tmp148, i32* %tmp149
+ ret %struct.ZZIP_FILE* %tmp140
+
+bb: ; preds = %bb219
+ store i32 0, i32* %e
+ store i8 0, i8* %tmp221
+ %basename162 = getelementptr [1024 x i8]* %basename, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp166 = call i32 @__zzip_try_open( i8* %basename162, i32 %o_flags_addr.0, i8** %ext_addr.1, %struct.zzip_plugin_io* %io_addr.1 ) ; <i32> [#uses=4]
+ icmp eq i32 %tmp166, -1 ; <i1>:19 [#uses=1]
+ br i1 %19, label %bb219, label %cond_next169
+
+cond_next169: ; preds = %bb
+ %tmp173 = call %struct.zzip_dir* @zzip_dir_fdopen_ext_io( i32 %tmp166, i32* %e, i8** %ext_addr.1, %struct.zzip_plugin_io* %io_addr.1 ) ; <%struct.zzip_dir*> [#uses=7]
+ %tmp174 = load i32* %e ; <i32> [#uses=1]
+ icmp eq i32 %tmp174, 0 ; <i1>:20 [#uses=1]
+ br i1 %20, label %cond_next185, label %cond_true175
+
+cond_true175: ; preds = %cond_next169
+ %tmp176 = load i32* %e ; <i32> [#uses=1]
+ %tmp177 = call i32 @zzip_errno( i32 %tmp176 ) ; <i32> [#uses=1]
+ %tmp178 = call i32* @__error( ) ; <i32*> [#uses=1]
+ store i32 %tmp177, i32* %tmp178
+ %tmp180 = getelementptr %struct.zzip_plugin_io* %io_addr.1, i32 0, i32 1 ; <i32 (i32)**> [#uses=1]
+ %tmp181 = load i32 (i32)** %tmp180 ; <i32 (i32)*> [#uses=1]
+ %tmp183 = call i32 %tmp181( i32 %tmp166 ) ; <i32> [#uses=0]
+ ret %struct.ZZIP_FILE* null
+
+cond_next185: ; preds = %cond_next169
+ %tmp186187 = ptrtoint i8* %tmp221 to i32 ; <i32> [#uses=1]
+ %basename188189 = ptrtoint [1024 x i8]* %basename to i32 ; <i32> [#uses=1]
+ %tmp190 = sub i32 %tmp186187, %basename188189 ; <i32> [#uses=1]
+ %tmp192.sum = add i32 %tmp190, 1 ; <i32> [#uses=1]
+ %tmp193 = getelementptr i8* %filename, i32 %tmp192.sum ; <i8*> [#uses=1]
+ %tmp196 = call %struct.ZZIP_FILE* @zzip_file_open( %struct.zzip_dir* %tmp173, i8* %tmp193, i32 %o_modes, i32 -1 ) ; <%struct.ZZIP_FILE*> [#uses=4]
+ icmp eq %struct.ZZIP_FILE* %tmp196, null ; <i1>:21 [#uses=1]
+ br i1 %21, label %cond_true198, label %cond_false204
+
+cond_true198: ; preds = %cond_next185
+ %tmp200 = getelementptr %struct.zzip_dir* %tmp173, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp201 = load i32* %tmp200 ; <i32> [#uses=1]
+ %tmp202 = call i32 @zzip_errno( i32 %tmp201 ) ; <i32> [#uses=1]
+ %tmp203 = call i32* @__error( ) ; <i32*> [#uses=1]
+ store i32 %tmp202, i32* %tmp203
+ %tmp2169 = call i32 @zzip_dir_close( %struct.zzip_dir* %tmp173 ) ; <i32> [#uses=0]
+ ret %struct.ZZIP_FILE* %tmp196
+
+cond_false204: ; preds = %cond_next185
+ %tmp206 = getelementptr %struct.zzip_dir* %tmp173, i32 0, i32 9 ; <i8**> [#uses=1]
+ %tmp207 = load i8** %tmp206 ; <i8*> [#uses=1]
+ icmp eq i8* %tmp207, null ; <i1>:22 [#uses=1]
+ br i1 %22, label %cond_true208, label %cond_next214
+
+cond_true208: ; preds = %cond_false204
+ %basename209 = getelementptr [1024 x i8]* %basename, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp210 = call i8* @strdup( i8* %basename209 ) ; <i8*> [#uses=1]
+ %tmp212 = getelementptr %struct.zzip_dir* %tmp173, i32 0, i32 9 ; <i8**> [#uses=1]
+ store i8* %tmp210, i8** %tmp212
+ %tmp21610 = call i32 @zzip_dir_close( %struct.zzip_dir* %tmp173 ) ; <i32> [#uses=0]
+ ret %struct.ZZIP_FILE* %tmp196
+
+cond_next214: ; preds = %cond_false204
+ %tmp216 = call i32 @zzip_dir_close( %struct.zzip_dir* %tmp173 ) ; <i32> [#uses=0]
+ ret %struct.ZZIP_FILE* %tmp196
+
+bb219: ; preds = %bb, %cond_true124, %cond_true119, %cond_true104, %cond_true98, %cond_true94, %cond_next80
+ %fd160.0 = phi i32 [ %fd160.3, %cond_next80 ], [ %tmp166, %bb ], [ %fd160.3, %cond_true94 ], [ %fd160.3, %cond_true98 ], [ %fd160.3, %cond_true104 ], [ %fd160.3, %cond_true119 ], [ %fd160.3, %cond_true124 ] ; <i32> [#uses=1]
+ %len.0 = phi i32 [ %len.3, %cond_next80 ], [ %len.0, %bb ], [ %len.3, %cond_true94 ], [ %len.3, %cond_true98 ], [ %tmp110, %cond_true104 ], [ %tmp110, %cond_true119 ], [ %tmp110, %cond_true124 ] ; <i32> [#uses=2]
+ %basename220 = getelementptr [1024 x i8]* %basename, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp221 = call i8* @strrchr( i8* %basename220, i32 47 ) ; <i8*> [#uses=3]
+ icmp eq i8* %tmp221, null ; <i1>:23 [#uses=1]
+ br i1 %23, label %bb223, label %bb
+
+bb223: ; preds = %bb219
+ %tmp2262272 = and i32 %o_modes, 16384 ; <i32> [#uses=1]
+ icmp eq i32 %tmp2262272, 0 ; <i1>:24 [#uses=1]
+ br i1 %24, label %cond_next229, label %try_real
+
+cond_next229: ; preds = %bb223
+ %tmp230 = call i32* @__error( ) ; <i32*> [#uses=1]
+ store i32 2, i32* %tmp230
+ ret %struct.ZZIP_FILE* null
+
+return: ; preds = %cond_true130, %cond_next67
+ %retval.0 = phi %struct.ZZIP_FILE* [ null, %cond_next67 ], [ %tmp140, %cond_true130 ] ; <%struct.ZZIP_FILE*> [#uses=1]
+ ret %struct.ZZIP_FILE* %retval.0
+}
+
+declare i32 @zzip_dir_close(%struct.zzip_dir*)
+
+declare i8* @strrchr(i8*, i32)
+
+declare %struct.ZZIP_FILE* @zzip_file_open(%struct.zzip_dir*, i8*, i32, i32)
+
+declare i8* @cli_calloc(i32, i32)
+
+declare i32 @zzip_errno(i32)
+
+declare i32* @__error()
+
+declare %struct.zzip_plugin_io* @zzip_get_default_io()
+
+declare i8* @strcpy(i8*, i8*)
+
+declare i32 @strlen(i8*)
+
+declare i32 @memcmp(i8*, i8*, i32)
+
+declare i32 @__zzip_try_open(i8*, i32, i8**, %struct.zzip_plugin_io*)
+
+declare %struct.zzip_dir* @zzip_dir_fdopen_ext_io(i32, i32*, i8**, %struct.zzip_plugin_io*)
+
+declare i8* @strdup(i8*)
diff --git a/test/Transforms/InstCombine/2007-04-04-BadFoldBitcastIntoMalloc.ll b/test/Transforms/InstCombine/2007-04-04-BadFoldBitcastIntoMalloc.ll
new file mode 100644
index 0000000..e738635
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-04-04-BadFoldBitcastIntoMalloc.ll
@@ -0,0 +1,19 @@
+; In the presence of a negative offset (the -8 below), a fold of a bitcast into
+; a malloc messes up the element count, causing an extra 4GB to be allocated on
+; 64-bit targets.
+;
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep {= add }
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "x86_64-unknown-freebsd6.2"
+
+define i1 @test(i32 %tmp141, double** %tmp145)
+{
+ %tmp133 = add i32 %tmp141, 1
+ %tmp134 = shl i32 %tmp133, 3
+ %tmp135 = add i32 %tmp134, -8
+ %tmp136 = malloc i8, i32 %tmp135
+ %tmp137 = bitcast i8* %tmp136 to double*
+ store double* %tmp137, double** %tmp145
+ ret i1 false
+}
diff --git a/test/Transforms/InstCombine/2007-04-08-SingleEltVectorCrash.ll b/test/Transforms/InstCombine/2007-04-08-SingleEltVectorCrash.ll
new file mode 100644
index 0000000..34322a2
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-04-08-SingleEltVectorCrash.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-as < %s | opt -instcombine -disable-output
+; PR1304
+
+define i64 @bork(<1 x i64> %vec) {
+ %tmp = extractelement <1 x i64> %vec, i32 0
+ ret i64 %tmp
+}
diff --git a/test/Transforms/InstCombine/2007-05-04-Crash.ll b/test/Transforms/InstCombine/2007-05-04-Crash.ll
new file mode 100644
index 0000000..5ad7919
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-05-04-Crash.ll
@@ -0,0 +1,30 @@
+; RUN: llvm-as < %s | opt -instcombine -disable-output
+; PR1384
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "i686-apple-darwin8"
+ %struct.CFRuntimeBase = type { i32, [4 x i8] }
+ %struct.CGColor = type opaque
+ %struct.CGColorSpace = type { %struct.CFRuntimeBase, i8, i8, i8, i32, i32, i32, %struct.CGColor*, float*, %struct.CGMD5Signature, %struct.CGMD5Signature*, [0 x %struct.CGColorSpaceDescriptor] }
+ %struct.CGColorSpaceCalibratedRGBData = type { [3 x float], [3 x float], [3 x float], [9 x float] }
+ %struct.CGColorSpaceDescriptor = type { %struct.CGColorSpaceCalibratedRGBData }
+ %struct.CGColorSpaceLabData = type { [3 x float], [3 x float], [4 x float] }
+ %struct.CGMD5Signature = type { [16 x i8], i8 }
+
+declare fastcc %struct.CGColorSpace* @CGColorSpaceCreate(i32, i32)
+
+declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
+
+define %struct.CGColorSpace* @CGColorSpaceCreateLab(float* %whitePoint, float* %blackPoint, float* %range) {
+entry:
+ %tmp17 = call fastcc %struct.CGColorSpace* @CGColorSpaceCreate( i32 5, i32 3 ) ; <%struct.CGColorSpace*> [#uses=2]
+ %tmp28 = getelementptr %struct.CGColorSpace* %tmp17, i32 0, i32 11 ; <[0 x %struct.CGColorSpaceDescriptor]*> [#uses=1]
+ %tmp29 = getelementptr [0 x %struct.CGColorSpaceDescriptor]* %tmp28, i32 0, i32 0 ; <%struct.CGColorSpaceDescriptor*> [#uses=1]
+ %tmp30 = getelementptr %struct.CGColorSpaceDescriptor* %tmp29, i32 0, i32 0 ; <%struct.CGColorSpaceCalibratedRGBData*> [#uses=1]
+ %tmp3031 = bitcast %struct.CGColorSpaceCalibratedRGBData* %tmp30 to %struct.CGColorSpaceLabData* ; <%struct.CGColorSpaceLabData*> [#uses=1]
+ %tmp45 = getelementptr %struct.CGColorSpaceLabData* %tmp3031, i32 0, i32 2 ; <[4 x float]*> [#uses=1]
+ %tmp46 = getelementptr [4 x float]* %tmp45, i32 0, i32 0 ; <float*> [#uses=1]
+ %tmp4648 = bitcast float* %tmp46 to i8* ; <i8*> [#uses=1]
+ call void @llvm.memcpy.i32( i8* %tmp4648, i8* null, i32 16, i32 4 )
+ ret %struct.CGColorSpace* %tmp17
+}
diff --git a/test/Transforms/InstCombine/2007-05-10-icmp-or.ll b/test/Transforms/InstCombine/2007-05-10-icmp-or.ll
new file mode 100644
index 0000000..8769ded
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-05-10-icmp-or.ll
@@ -0,0 +1,8 @@
+; RUN: llvm-as < %s | opt -instcombine -disable-output
+define i1 @test(i32 %tmp9) {
+ %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1]
+ %tmp11.not = icmp sgt i32 %tmp9, 255 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp20, %tmp11.not ; <i1> [#uses=1]
+ ret i1 %bothcond
+}
+
diff --git a/test/Transforms/InstCombine/2007-05-14-Crash.ll b/test/Transforms/InstCombine/2007-05-14-Crash.ll
new file mode 100644
index 0000000..ececd35
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-05-14-Crash.ll
@@ -0,0 +1,18 @@
+; RUN: llvm-as < %s | opt -instcombine -disable-output
+
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "powerpc-apple-darwin8.8.0"
+
+%struct.abc = type { i32, [32 x i8] }
+%struct.def = type { i8**, %struct.abc }
+ %struct.anon = type <{ }>
+
+define i8* @foo(%struct.anon* %deviceRef, %struct.abc* %pCap) {
+entry:
+ %tmp1 = bitcast %struct.anon* %deviceRef to %struct.def*
+ %tmp3 = getelementptr %struct.def* %tmp1, i32 0, i32 1
+ %tmp35 = bitcast %struct.abc* %tmp3 to i8*
+ ret i8* %tmp35
+}
+
+
diff --git a/test/Transforms/InstCombine/2007-05-18-CastFoldBug.ll b/test/Transforms/InstCombine/2007-05-18-CastFoldBug.ll
new file mode 100644
index 0000000..55bfac5
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-05-18-CastFoldBug.ll
@@ -0,0 +1,10 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {call.*sret}
+; Make sure instcombine doesn't drop the sret attribute.
+
+define void @blah(i16* %tmp10) {
+entry:
+ call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend_stret to void (i16* sret )*)( i16* %tmp10 sret )
+ ret void
+}
+
+declare i8* @objc_msgSend_stret(i8*, i8*, ...)
diff --git a/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll b/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll
new file mode 100644
index 0000000..482c608
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll
@@ -0,0 +1,22 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {ashr}
+; PR1499
+
+define void @av_cmp_q_cond_true(i32* %retval, i32* %tmp9, i64* %tmp10) {
+newFuncRoot:
+ br label %cond_true
+
+return.exitStub: ; preds = %cond_true
+ ret void
+
+cond_true: ; preds = %newFuncRoot
+ %tmp30 = load i64* %tmp10 ; <i64> [#uses=1]
+ %.cast = zext i32 63 to i64 ; <i64> [#uses=1]
+ %tmp31 = ashr i64 %tmp30, %.cast ; <i64> [#uses=1]
+ %tmp3132 = trunc i64 %tmp31 to i32 ; <i32> [#uses=1]
+ %tmp33 = or i32 %tmp3132, 1 ; <i32> [#uses=1]
+ store i32 %tmp33, i32* %tmp9
+ %tmp34 = load i32* %tmp9 ; <i32> [#uses=1]
+ store i32 %tmp34, i32* %retval
+ br label %return.exitStub
+}
+
diff --git a/test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll b/test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll
new file mode 100644
index 0000000..ffc4026
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {ret i1 true}
+; rdar://5278853
+
+define i1 @test(i32 %tmp468) {
+ %tmp470 = udiv i32 %tmp468, 4 ; <i32> [#uses=2]
+ %tmp475 = icmp ult i32 %tmp470, 1073741824 ; <i1> [#uses=1]
+ ret i1 %tmp475
+}
+
diff --git a/test/Transforms/InstCombine/CPP_min_max.llx b/test/Transforms/InstCombine/CPP_min_max.llx
new file mode 100644
index 0000000..2818fa7
--- /dev/null
+++ b/test/Transforms/InstCombine/CPP_min_max.llx
@@ -0,0 +1,36 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep select | not grep {i32\\*}
+; END.
+
+; This testcase corresponds to PR362, which notices that this horrible code
+; is generated by the C++ front-end and LLVM optimizers, which has lots of
+; loads and other stuff that are unneeded.
+;
+; Instcombine should propagate the load through the select instructions to
+; allow elimination of the extra stuff by the mem2reg pass.
+
+implementation ; Functions:
+
+void %_Z5test1RiS_(int* %x, int* %y) {
+entry:
+ %tmp.1.i = load int* %y ; <int> [#uses=1]
+ %tmp.3.i = load int* %x ; <int> [#uses=1]
+ %tmp.4.i = setlt int %tmp.1.i, %tmp.3.i ; <bool> [#uses=1]
+ %retval.i = select bool %tmp.4.i, int* %y, int* %x ; <int*> [#uses=1]
+ %tmp.4 = load int* %retval.i ; <int> [#uses=1]
+ store int %tmp.4, int* %x
+ ret void
+}
+
+void %_Z5test2RiS_(int* %x, int* %y) {
+entry:
+ %tmp.0 = alloca int ; <int*> [#uses=2]
+ %tmp.2 = load int* %x ; <int> [#uses=2]
+ store int %tmp.2, int* %tmp.0
+ %tmp.3.i = load int* %y ; <int> [#uses=1]
+ %tmp.4.i = setlt int %tmp.2, %tmp.3.i ; <bool> [#uses=1]
+ %retval.i = select bool %tmp.4.i, int* %y, int* %tmp.0 ; <int*> [#uses=1]
+ %tmp.6 = load int* %retval.i ; <int> [#uses=1]
+ store int %tmp.6, int* %y
+ ret void
+}
diff --git a/test/Transforms/InstCombine/GEPIdxCanon.ll b/test/Transforms/InstCombine/GEPIdxCanon.ll
new file mode 100644
index 0000000..bf0ab79
--- /dev/null
+++ b/test/Transforms/InstCombine/GEPIdxCanon.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -gcse -instcombine | \
+; RUN: llvm-dis | not grep getelementptr
+
+bool %test(int* %A) {
+ %B = getelementptr int* %A, int 1
+ %C = getelementptr int* %A, uint 1
+ %V = seteq int* %B, %C
+ ret bool %V
+}
diff --git a/test/Transforms/InstCombine/IntPtrCast.ll b/test/Transforms/InstCombine/IntPtrCast.ll
new file mode 100644
index 0000000..7ff71c8
--- /dev/null
+++ b/test/Transforms/InstCombine/IntPtrCast.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | notcast
+target endian = little
+target pointersize = 32
+
+int *%test(int *%P) {
+ %V = cast int* %P to int
+ %P2 = cast int %V to int*
+ ret int* %P2
+}
diff --git a/test/Transforms/InstCombine/JavaCompare.ll b/test/Transforms/InstCombine/JavaCompare.ll
new file mode 100644
index 0000000..4ed064c
--- /dev/null
+++ b/test/Transforms/InstCombine/JavaCompare.ll
@@ -0,0 +1,15 @@
+; This is the sequence of stuff that the Java front-end expands for a single
+; <= comparison. Check to make sure we turn it into a <= (only)
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep -v {icmp sle} | not grep #uses
+
+bool %le(int %A, int %B) {
+ %c1 = setgt int %A, %B;
+ %tmp = select bool %c1, int 1, int 0;
+ %c2 = setlt int %A, %B;
+ %result = select bool %c2, int -1, int %tmp;
+ %c3 = setle int %result, 0;
+ ret bool %c3;
+}
+
diff --git a/test/Transforms/InstCombine/README.txt b/test/Transforms/InstCombine/README.txt
new file mode 100644
index 0000000..de043c7
--- /dev/null
+++ b/test/Transforms/InstCombine/README.txt
@@ -0,0 +1,4 @@
+This directory contains test cases for the instcombine transformation. The
+dated tests are actual bug tests, whereas the named tests are used to test
+for features that the this pass should be capable of performing.
+
diff --git a/test/Transforms/InstCombine/add.ll b/test/Transforms/InstCombine/add.ll
new file mode 100644
index 0000000..df99e96
--- /dev/null
+++ b/test/Transforms/InstCombine/add.ll
@@ -0,0 +1,251 @@
+; This test makes sure that add instructions are properly eliminated.
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep -v OK | not grep add
+; END.
+
+implementation
+
+int %test1(int %A) {
+ %B = add int %A, 0
+ ret int %B
+}
+
+int %test2(int %A) {
+ %B = add int %A, 5
+ %C = add int %B, -5
+ ret int %C
+}
+
+int %test3(int %A) {
+ %B = add int %A, 5
+ %C = sub int %B, 5 ;; This should get converted to an add
+ ret int %C
+}
+
+int %test4(int %A, int %B) {
+ %C = sub int 0, %A
+ %D = add int %B, %C ; D = B + -A = B - A
+ ret int %D
+}
+
+int %test5(int %A, int %B) {
+ %C = sub int 0, %A
+ %D = add int %C, %B ; D = -A + B = B - A
+ ret int %D
+}
+
+int %test6(int %A) {
+ %B = mul int 7, %A
+ %C = add int %B, %A ; C = 7*A+A == 8*A == A << 3
+ ret int %C
+}
+
+int %test7(int %A) {
+ %B = mul int 7, %A
+ %C = add int %A, %B ; C = A+7*A == 8*A == A << 3
+ ret int %C
+}
+
+; (A & C1)+(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+int %test8(int %A, int %B) {
+ %A1 = and int %A, 7
+ %B1 = and int %B, 128
+ %C = add int %A1, %B1
+ ret int %C
+}
+
+int %test9(int %A) {
+ %B = shl int %A, ubyte 4
+ %C = add int %B, %B ; === shl int %A, 5
+ ret int %C
+}
+
+bool %test10(ubyte %A, ubyte %b) {
+ %B = add ubyte %A, %b
+ %c = setne ubyte %B, 0 ; === A != -b
+ ret bool %c
+}
+
+bool %test11(ubyte %A) {
+ %B = add ubyte %A, 255
+ %c = setne ubyte %B, 0 ; === A != 1
+ ret bool %c
+}
+
+int %test12(int %A, int %B) {
+ %C_OK = add int %B, %A ; Should be transformed into shl A, 1
+ br label %X
+X:
+ %D = add int %C_OK, %A
+ ret int %D
+}
+
+int %test13(int %A, int %B, int %C) {
+ %D_OK = add int %A, %B
+ %E_OK = add int %D_OK, %C
+ %F = add int %E_OK, %A ;; shl A, 1
+ ret int %F
+}
+
+uint %test14(uint %offset, uint %difference) {
+ %tmp.2 = and uint %difference, 3
+ %tmp.3_OK = add uint %tmp.2, %offset
+ %tmp.5.mask = and uint %difference, 4294967292
+ %tmp.8 = add uint %tmp.3_OK, %tmp.5.mask ; == add %offset, %difference
+ ret uint %tmp.8
+}
+
+ubyte %test15(ubyte %A) {
+ %B = add ubyte %A, 192 ; Does not effect result
+ %C = and ubyte %B, 16 ; Only one bit set
+ ret ubyte %C
+}
+
+ubyte %test16(ubyte %A) {
+ %B = add ubyte %A, 16 ; Turn this into a XOR
+ %C = and ubyte %B, 16 ; Only one bit set
+ ret ubyte %C
+}
+
+int %test17(int %A) {
+ %B = xor int %A, -1
+ %C = add int %B, 1 ; == sub int 0, %A
+ ret int %C
+}
+
+ubyte %test18(ubyte %A) {
+ %B = xor ubyte %A, 255
+ %C = add ubyte %B, 17 ; == sub ubyte 16, %A
+ ret ubyte %C
+}
+
+int %test19(bool %C) {
+ %A = select bool %C, int 1000, int 10
+ %V = add int %A, 123
+ ret int %V
+}
+
+int %test20(int %x) {
+ %tmp.2 = xor int %x, -2147483648
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add int %tmp.2, -2147483648
+ ret int %tmp.4
+}
+
+bool %test21(uint %x) {
+ %t = add uint %x, 4
+ %y = seteq uint %t, 123
+ ret bool %y
+}
+
+int %test22(uint %V) {
+ %V2 = add uint %V, 10
+ switch uint %V2, label %Default [
+ uint 20, label %Lab1
+ uint 30, label %Lab2
+ ]
+Default:
+ ret int 123
+Lab1:
+ ret int 12312
+Lab2:
+ ret int 1231231
+}
+
+int %test23(bool %C, int %a) {
+entry:
+ br bool %C, label %endif, label %else
+
+else:
+ br label %endif
+
+endif:
+ %b.0 = phi int [ 0, %entry ], [ 1, %else ]
+ %tmp.4 = add int %b.0, 1
+ ret int %tmp.4
+}
+
+int %test24(int %A) {
+ %B = add int %A, 1
+ %C = shl int %B, ubyte 1
+ %D = sub int %C, 2
+ ret int %D ;; A << 1
+}
+
+long %test25(long %Y) {
+ %tmp.4 = shl long %Y, ubyte 2
+ %tmp.12 = shl long %Y, ubyte 2
+ %tmp.8 = add long %tmp.4, %tmp.12 ;; Y << 3
+ ret long %tmp.8
+}
+
+int %test26(int %A, int %B) {
+ %C = add int %A, %B
+ %D = sub int %C, %B
+ ret int %D
+}
+
+int %test27(bool %C, int %X, int %Y) {
+ %A = add int %X, %Y
+ %B = add int %Y, 123
+ %C = select bool %C, int %A, int %B ;; Fold add through select.
+ %D = sub int %C, %Y
+ ret int %D
+}
+
+int %test28(int %X) {
+ %Y = add int %X, 1234
+ %Z = sub int 42, %Y
+ ret int %Z
+}
+
+uint %test29(uint %X, uint %x) {
+ %tmp.2 = sub uint %X, %x
+ %tmp.2.mask = and uint %tmp.2, 63 ; <uint> [#uses=1]
+ %tmp.6 = add uint %tmp.2.mask, %x ; <uint> [#uses=1]
+ %tmp.7 = and uint %tmp.6, 63 ; <uint> [#uses=1]
+ %tmp.9 = and uint %tmp.2, 4294967232 ; <uint> [#uses=1]
+ %tmp.10 = or uint %tmp.7, %tmp.9 ; <uint> [#uses=1]
+ ret uint %tmp.10
+}
+
+long %test30(long %x) {
+ %tmp.2 = xor long %x, -9223372036854775808
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add long %tmp.2, -9223372036854775808
+ ret long %tmp.4
+}
+
+int %test31(int %A) {
+ %B = add int %A, 4
+ %C = mul int %B, 5
+ %D = sub int %C, 20
+ ret int %D
+}
+
+int %test32(int %A) {
+ %B = add int %A, 4
+ %C = shl int %B, ubyte 2
+ %D = sub int %C, 16
+ ret int %D
+}
+
+ubyte %test33(ubyte %A) { ;; OR A, 1
+ %B = and ubyte %A, 254
+ %C = add ubyte %B, 1
+ ret ubyte %C
+}
+
+ubyte %test34(ubyte %A) {
+ %B = add ubyte %A, 64 ;; dead
+ %C = and ubyte %B, 12
+ ret ubyte %C
+}
+
+int %test35(int %a) { ;; -> -1
+ %tmpnot = xor int %a, -1
+ %tmp2 = add int %tmpnot, %a
+ ret int %tmp2
+}
+
diff --git a/test/Transforms/InstCombine/add2.ll b/test/Transforms/InstCombine/add2.ll
new file mode 100644
index 0000000..ff89946
--- /dev/null
+++ b/test/Transforms/InstCombine/add2.ll
@@ -0,0 +1,11 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: grep -v OK | not grep add
+
+define i64 @test1(i64 %A, i32 %B) {
+ %tmp12 = zext i32 %B to i64
+ %tmp3 = shl i64 %tmp12, 32
+ %tmp5 = add i64 %tmp3, %A
+ %tmp6 = and i64 %tmp5, 123
+ ret i64 %tmp6
+}
+
diff --git a/test/Transforms/InstCombine/alloca.ll b/test/Transforms/InstCombine/alloca.ll
new file mode 100644
index 0000000..43e4e32
--- /dev/null
+++ b/test/Transforms/InstCombine/alloca.ll
@@ -0,0 +1,29 @@
+; Zero byte allocas should be deleted.
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep alloca
+; END.
+
+declare void %use(...)
+void %test() {
+ %X = alloca [0 x int]
+ call void(...)* %use([0 x int] *%X)
+ %Y = alloca int, uint 0
+ call void(...)* %use(int* %Y)
+ %Z = alloca {}
+ call void(...)* %use({}* %Z)
+ ret void
+}
+
+void %test2() {
+ %A = alloca int ;; dead.
+ store int 123, int* %A
+ ret void
+}
+
+void %test3() {
+ %A = alloca {int} ;; dead.
+ %B = getelementptr {int}* %A, int 0, uint 0
+ store int 123, int* %B
+ ret void
+}
diff --git a/test/Transforms/InstCombine/and-compare.ll b/test/Transforms/InstCombine/and-compare.ll
new file mode 100644
index 0000000..5980631
--- /dev/null
+++ b/test/Transforms/InstCombine/and-compare.ll
@@ -0,0 +1,11 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep and | wc -l | grep 1
+
+; Should be optimized to one and.
+bool %test1(uint %a, uint %b) {
+ %tmp1 = and uint %a, 65280
+ %tmp3 = and uint %b, 65280
+ %tmp = setne uint %tmp1, %tmp3
+ ret bool %tmp
+}
+
diff --git a/test/Transforms/InstCombine/and-or-and.ll b/test/Transforms/InstCombine/and-or-and.ll
new file mode 100644
index 0000000..ea7a87f
--- /dev/null
+++ b/test/Transforms/InstCombine/and-or-and.ll
@@ -0,0 +1,56 @@
+; If we have an 'and' of the result of an 'or', and one of the 'or' operands
+; cannot have contributed any of the resultant bits, delete the or. This
+; occurs for very common C/C++ code like this:
+;
+; struct foo { int A : 16; int B : 16; };
+; void test(struct foo *F, int X, int Y) {
+; F->A = X; F->B = Y;
+; }
+;
+; Which corresponds to test1.
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep {or }
+; END.
+
+int %test1(int %X, int %Y) {
+ %A = and int %X, 7
+ %B = and int %Y, 8
+ %C = or int %A, %B
+ %D = and int %C, 7 ;; This cannot include any bits from %Y!
+ ret int %D
+}
+
+int %test2(int %X, ubyte %Y) {
+ %B = cast ubyte %Y to int
+ %C = or int %X, %B
+ %D = and int %C, 65536 ;; This cannot include any bits from %Y!
+ ret int %D
+}
+
+int %test3(int %X, int %Y) {
+ %B = shl int %Y, ubyte 1
+ %C = or int %X, %B
+ %D = and int %C, 1 ;; This cannot include any bits from %Y!
+ ret int %D
+}
+
+uint %test4(uint %X, uint %Y) {
+ %B = shr uint %Y, ubyte 31
+ %C = or uint %X, %B
+ %D = and uint %C, 2 ;; This cannot include any bits from %Y!
+ ret uint %D
+}
+
+int %or_test1(int %X, int %Y) {
+ %A = and int %X, 1
+ %B = or int %A, 1 ;; This cannot include any bits from X!
+ ret int %B
+}
+
+ubyte %or_test2(ubyte %X, ubyte %Y) {
+ %A = shl ubyte %X, ubyte 7
+ %B = or ubyte %A, 128 ;; This cannot include any bits from X!
+ ret ubyte %B
+}
+
diff --git a/test/Transforms/InstCombine/and-or-not.ll b/test/Transforms/InstCombine/and-or-not.ll
new file mode 100644
index 0000000..e9c7b12
--- /dev/null
+++ b/test/Transforms/InstCombine/and-or-not.ll
@@ -0,0 +1,46 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep xor | wc -l | grep 4
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep and
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep { or}
+
+; PR1510
+
+; These are all equivelent to A^B
+
+define i32 @test1(i32 %a, i32 %b) {
+entry:
+ %tmp3 = or i32 %b, %a ; <i32> [#uses=1]
+ %tmp3not = xor i32 %tmp3, -1 ; <i32> [#uses=1]
+ %tmp6 = and i32 %b, %a ; <i32> [#uses=1]
+ %tmp7 = or i32 %tmp6, %tmp3not ; <i32> [#uses=1]
+ %tmp7not = xor i32 %tmp7, -1 ; <i32> [#uses=1]
+ ret i32 %tmp7not
+}
+
+define i32 @test2(i32 %a, i32 %b) {
+entry:
+ %tmp3 = or i32 %b, %a ; <i32> [#uses=1]
+ %tmp6 = and i32 %b, %a ; <i32> [#uses=1]
+ %tmp6not = xor i32 %tmp6, -1 ; <i32> [#uses=1]
+ %tmp7 = and i32 %tmp3, %tmp6not ; <i32> [#uses=1]
+ ret i32 %tmp7
+}
+
+define <4 x i32> @test3(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %tmp3 = or <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp3not = xor <4 x i32> %tmp3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
+ %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp7 = or <4 x i32> %tmp6, %tmp3not ; <<4 x i32>> [#uses=1]
+ %tmp7not = xor <4 x i32> %tmp7, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %tmp7not
+}
+
+define <4 x i32> @test4(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %tmp3 = or <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp6not = xor <4 x i32> %tmp6, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
+ %tmp7 = and <4 x i32> %tmp3, %tmp6not ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %tmp7
+}
+
diff --git a/test/Transforms/InstCombine/and-xor-merge.ll b/test/Transforms/InstCombine/and-xor-merge.ll
new file mode 100644
index 0000000..c53ebc6
--- /dev/null
+++ b/test/Transforms/InstCombine/and-xor-merge.ll
@@ -0,0 +1,19 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep and | wc -l | grep 1
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep xor | wc -l | grep 2
+
+; (x&z) ^ (y&z) -> (x^y)&z
+define i32 @test1(i32 %x, i32 %y, i32 %z) {
+ %tmp3 = and i32 %z, %x
+ %tmp6 = and i32 %z, %y
+ %tmp7 = xor i32 %tmp3, %tmp6
+ ret i32 %tmp7
+}
+
+; (x & y) ^ (x|y) -> x^y
+define i32 @test2(i32 %x, i32 %y, i32 %z) {
+ %tmp3 = and i32 %y, %x
+ %tmp6 = or i32 %y, %x
+ %tmp7 = xor i32 %tmp3, %tmp6
+ ret i32 %tmp7
+}
+
diff --git a/test/Transforms/InstCombine/and.ll b/test/Transforms/InstCombine/and.ll
new file mode 100644
index 0000000..7b08975
--- /dev/null
+++ b/test/Transforms/InstCombine/and.ll
@@ -0,0 +1,229 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep and
+; END.
+
+implementation
+
+int %test1(int %A) {
+ %B = and int %A, 0 ; zero result
+ ret int %B
+}
+
+int %test2(int %A) {
+ %B = and int %A, -1 ; noop
+ ret int %B
+}
+
+bool %test3(bool %A) {
+ %B = and bool %A, false ; always = false
+ ret bool %B
+}
+
+bool %test4(bool %A) {
+ %B = and bool %A, true ; noop
+ ret bool %B
+}
+
+int %test5(int %A) {
+ %B = and int %A, %A
+ ret int %B
+}
+
+bool %test6(bool %A) {
+ %B = and bool %A, %A
+ ret bool %B
+}
+
+int %test7(int %A) { ; A & ~A == 0
+ %NotA = xor int %A, -1
+ %B = and int %A, %NotA
+ ret int %B
+}
+
+ubyte %test8(ubyte %A) { ; AND associates
+ %B = and ubyte %A, 3
+ %C = and ubyte %B, 4
+ ret ubyte %C
+}
+
+bool %test9(int %A) {
+ %B = and int %A, -2147483648 ; Test of sign bit, convert to setle %A, 0
+ %C = setne int %B, 0
+ ret bool %C
+}
+
+bool %test9(uint %A) {
+ %B = and uint %A, 2147483648 ; Test of sign bit, convert to setle %A, 0
+ %C = setne uint %B, 0
+ ret bool %C
+}
+
+uint %test10(uint %A) {
+ %B = and uint %A, 12
+ %C = xor uint %B, 15
+ %D = and uint %C, 1 ; (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
+ ret uint %D
+}
+
+uint %test11(uint %A, uint* %P) {
+ %B = or uint %A, 3
+ %C = xor uint %B, 12
+ store uint %C, uint* %P ; additional use of C
+ %D = and uint %C, 3 ; %C = and uint %B, 3 --> 3
+ ret uint %D
+}
+
+bool %test12(uint %A, uint %B) {
+ %C1 = setlt uint %A, %B
+ %C2 = setle uint %A, %B
+ %D = and bool %C1, %C2 ; (A < B) & (A <= B) === (A < B)
+ ret bool %D
+}
+
+bool %test13(uint %A, uint %B) {
+ %C1 = setlt uint %A, %B
+ %C2 = setgt uint %A, %B
+ %D = and bool %C1, %C2 ; (A < B) & (A > B) === false
+ ret bool %D
+}
+
+bool %test14(ubyte %A) {
+ %B = and ubyte %A, 128
+ %C = setne ubyte %B, 0
+ ret bool %C
+}
+
+ubyte %test15(ubyte %A) {
+ %B = shr ubyte %A, ubyte 7
+ %C = and ubyte %B, 2 ; Always equals zero
+ ret ubyte %C
+}
+
+ubyte %test16(ubyte %A) {
+ %B = shl ubyte %A, ubyte 2
+ %C = and ubyte %B, 3
+ ret ubyte %C
+}
+
+sbyte %test17(sbyte %X, sbyte %Y) { ;; ~(~X & Y) --> (X | ~Y)
+ %B = xor sbyte %X, -1
+ %C = and sbyte %B, %Y
+ %D = xor sbyte %C, -1
+ ret sbyte %D
+}
+
+bool %test18(int %A) {
+ %B = and int %A, -128
+ %C = setne int %B, 0 ;; C >= 128
+ ret bool %C
+}
+
+bool %test18a(ubyte %A) {
+ %B = and ubyte %A, 254
+ %C = seteq ubyte %B, 0
+ ret bool %C
+}
+
+int %test19(int %A) {
+ %B = shl int %A, ubyte 3
+ %C = and int %B, -2 ;; Clearing a zero bit
+ ret int %C
+}
+
+ubyte %test20(ubyte %A) {
+ %C = shr ubyte %A, ubyte 7
+ %D = and ubyte %C, 1 ;; Unneeded
+ ret ubyte %D
+}
+
+bool %test22(int %A) {
+ %B = seteq int %A, 1
+ %C = setge int %A, 3
+ %D = and bool %B, %C ;; False
+ ret bool %D
+}
+
+bool %test23(int %A) {
+ %B = setgt int %A, 1
+ %C = setle int %A, 2
+ %D = and bool %B, %C ;; A == 2
+ ret bool %D
+}
+
+bool %test24(int %A) {
+ %B = setgt int %A, 1
+ %C = setne int %A, 2
+ %D = and bool %B, %C ;; A > 2
+ ret bool %D
+}
+
+bool %test25(int %A) {
+ %B = setge int %A, 50
+ %C = setlt int %A, 100
+ %D = and bool %B, %C ;; (A-50) <u 50
+ ret bool %D
+}
+
+bool %test26(int %A) {
+ %B = setne int %A, 50
+ %C = setne int %A, 51
+ %D = and bool %B, %C ;; (A-50) > 1
+ ret bool %D
+}
+
+ubyte %test27(ubyte %A) {
+ %B = and ubyte %A, 4
+ %C = sub ubyte %B, 16
+ %D = and ubyte %C, 240 ;; 0xF0
+ %E = add ubyte %D, 16
+ ret ubyte %E
+}
+
+int %test28(int %X) { ;; This is juse a zero extending shr.
+ %Y = shr int %X, ubyte 24 ;; Sign extend
+ %Z = and int %Y, 255 ;; Mask out sign bits
+ ret int %Z
+}
+
+int %test29(ubyte %X) {
+ %Y = cast ubyte %X to int
+ %Z = and int %Y, 255 ;; Zero extend makes this unneeded.
+ ret int %Z
+}
+
+int %test30(bool %X) {
+ %Y = cast bool %X to int
+ %Z = and int %Y, 1
+ ret int %Z
+}
+
+uint %test31(bool %X) {
+ %Y = cast bool %X to uint
+ %Z = shl uint %Y, ubyte 4
+ %A = and uint %Z, 16
+ ret uint %A
+}
+
+uint %test32(uint %In) {
+ %Y = and uint %In, 16
+ %Z = shr uint %Y, ubyte 2
+ %A = and uint %Z, 1
+ ret uint %A
+}
+
+uint %test33(uint %b) { ;; Code corresponding to one-bit bitfield ^1.
+ %tmp.4.mask = and uint %b, 1
+ %tmp.10 = xor uint %tmp.4.mask, 1
+ %tmp.12 = and uint %b, 4294967294
+ %tmp.13 = or uint %tmp.12, %tmp.10
+ ret uint %tmp.13
+}
+
+int %test34(int %A, int %B) {
+ %tmp.2 = or int %B, %A
+ %tmp.4 = and int %tmp.2, %B
+ ret int %tmp.4
+}
+
diff --git a/test/Transforms/InstCombine/apint-add1.ll b/test/Transforms/InstCombine/apint-add1.ll
new file mode 100644
index 0000000..74280ee
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-add1.ll
@@ -0,0 +1,34 @@
+; This test makes sure that add instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 8 != 0.
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: grep -v OK | not grep add
+
+
+define i1 @test1(i1 %x) {
+ %tmp.2 = xor i1 %x, 1
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i1 %tmp.2, 1
+ ret i1 %tmp.4
+}
+
+define i47 @test2(i47 %x) {
+ %tmp.2 = xor i47 %x, 70368744177664
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i47 %tmp.2, 70368744177664
+ ret i47 %tmp.4
+}
+
+define i15 @test3(i15 %x) {
+ %tmp.2 = xor i15 %x, 16384
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i15 %tmp.2, 16384
+ ret i15 %tmp.4
+}
+
+define i49 @test6(i49 %x) {
+ ;; (x & 254)+1 -> (x & 254)|1
+ %tmp.2 = and i49 %x, 562949953421310
+ %tmp.4 = add i49 %tmp.2, 1
+ ret i49 %tmp.4
+}
diff --git a/test/Transforms/InstCombine/apint-add2.ll b/test/Transforms/InstCombine/apint-add2.ll
new file mode 100644
index 0000000..0ddfcc0
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-add2.ll
@@ -0,0 +1,46 @@
+; This test makes sure that add instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: grep -v OK | not grep add
+; END.
+
+define i111 @test1(i111 %x) {
+ %tmp.2 = shl i111 1, 110
+ %tmp.4 = xor i111 %x, %tmp.2
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.6 = add i111 %tmp.4, %tmp.2
+ ret i111 %tmp.6
+}
+
+define i65 @test2(i65 %x) {
+ %tmp.0 = shl i65 1, 64
+ %tmp.2 = xor i65 %x, %tmp.0
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i65 %tmp.2, %tmp.0
+ ret i65 %tmp.4
+}
+
+define i1024 @test3(i1024 %x) {
+ %tmp.0 = shl i1024 1, 1023
+ %tmp.2 = xor i1024 %x, %tmp.0
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i1024 %tmp.2, %tmp.0
+ ret i1024 %tmp.4
+}
+
+define i128 @test4(i128 %x) {
+ ;; If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
+ %tmp.5 = shl i128 1, 127
+ %tmp.1 = ashr i128 %tmp.5, 120
+ %tmp.2 = xor i128 %x, %tmp.1
+ %tmp.4 = add i128 %tmp.2, %tmp.5
+ ret i128 %tmp.4
+}
+
+define i77 @test6(i77 %x) {
+ ;; (x & 254)+1 -> (x & 254)|1
+ %tmp.2 = and i77 %x, 562949953421310
+ %tmp.4 = add i77 %tmp.2, 1
+ ret i77 %tmp.4
+}
diff --git a/test/Transforms/InstCombine/apint-and-compare.ll b/test/Transforms/InstCombine/apint-and-compare.ll
new file mode 100644
index 0000000..4d250a0
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-and-compare.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep and | wc -l | grep 2
+
+; Should be optimized to one and.
+define i1 @test1(i33 %a, i33 %b) {
+ %tmp1 = and i33 %a, 65280
+ %tmp3 = and i33 %b, 65280
+ %tmp = icmp ne i33 %tmp1, %tmp3
+ ret i1 %tmp
+}
+
+define i1 @test2(i999 %a, i999 %b) {
+ %tmp1 = and i999 %a, 65280
+ %tmp3 = and i999 %b, 65280
+ %tmp = icmp ne i999 %tmp1, %tmp3
+ ret i1 %tmp
+}
diff --git a/test/Transforms/InstCombine/apint-and-or-and.ll b/test/Transforms/InstCombine/apint-and-or-and.ll
new file mode 100644
index 0000000..4630f28
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-and-or-and.ll
@@ -0,0 +1,50 @@
+; If we have an 'and' of the result of an 'or', and one of the 'or' operands
+; cannot have contributed any of the resultant bits, delete the or. This
+; occurs for very common C/C++ code like this:
+;
+; struct foo { int A : 16; int B : 16; };
+; void test(struct foo *F, int X, int Y) {
+; F->A = X; F->B = Y;
+; }
+;
+; Which corresponds to test1.
+;
+; This tests arbitrary precision integers.
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep {or }
+; END.
+
+define i17 @test1(i17 %X, i17 %Y) {
+ %A = and i17 %X, 7
+ %B = and i17 %Y, 8
+ %C = or i17 %A, %B
+ %D = and i17 %C, 7 ;; This cannot include any bits from %Y!
+ ret i17 %D
+}
+
+define i49 @test3(i49 %X, i49 %Y) {
+ %B = shl i49 %Y, 1
+ %C = or i49 %X, %B
+ %D = and i49 %C, 1 ;; This cannot include any bits from %Y!
+ ret i49 %D
+}
+
+define i67 @test4(i67 %X, i67 %Y) {
+ %B = lshr i67 %Y, 66
+ %C = or i67 %X, %B
+ %D = and i67 %C, 2 ;; This cannot include any bits from %Y!
+ ret i67 %D
+}
+
+define i231 @or_test1(i231 %X, i231 %Y) {
+ %A = and i231 %X, 1
+ %B = or i231 %A, 1 ;; This cannot include any bits from X!
+ ret i231 %B
+}
+
+define i7 @or_test2(i7 %X, i7 %Y) {
+ %A = shl i7 %X, 6
+ %B = or i7 %A, 64 ;; This cannot include any bits from X!
+ ret i7 %B
+}
+
diff --git a/test/Transforms/InstCombine/apint-and-xor-merge.ll b/test/Transforms/InstCombine/apint-and-xor-merge.ll
new file mode 100644
index 0000000..665a643
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-and-xor-merge.ll
@@ -0,0 +1,22 @@
+; This test case checks that the merge of and/xor can work on arbitrary
+; precision integers.
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep and | wc -l | grep 1
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep xor | wc -l | grep 2
+
+; (x &z ) ^ (y & z) -> (x ^ y) & z
+define i57 @test1(i57 %x, i57 %y, i57 %z) {
+ %tmp3 = and i57 %z, %x
+ %tmp6 = and i57 %z, %y
+ %tmp7 = xor i57 %tmp3, %tmp6
+ ret i57 %tmp7
+}
+
+; (x & y) ^ (x | y) -> x ^ y
+define i23 @test2(i23 %x, i23 %y, i23 %z) {
+ %tmp3 = and i23 %y, %x
+ %tmp6 = or i23 %y, %x
+ %tmp7 = xor i23 %tmp3, %tmp6
+ ret i23 %tmp7
+}
+
diff --git a/test/Transforms/InstCombine/apint-and1.ll b/test/Transforms/InstCombine/apint-and1.ll
new file mode 100644
index 0000000..eb3b1a6
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-and1.ll
@@ -0,0 +1,57 @@
+; This test makes sure that and instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 8 != 0.
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep {and }
+; END.
+
+define i39 @test0(i39 %A) {
+ %B = and i39 %A, 0 ; zero result
+ ret i39 %B
+}
+
+define i47 @test1(i47 %A, i47 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i47 %A, -1
+ %NotB = xor i47 %B, -1
+ %C1 = and i47 %NotA, %NotB
+ ret i47 %C1
+}
+
+define i15 @test2(i15 %x) {
+ %tmp.2 = and i15 %x, -1 ; noop
+ ret i15 %tmp.2
+}
+
+define i23 @test3(i23 %x) {
+ %tmp.0 = and i23 %x, 127
+ %tmp.2 = and i23 %tmp.0, 128
+ ret i23 %tmp.2
+}
+
+define i1 @test4(i37 %x) {
+ %A = and i37 %x, -2147483648
+ %B = icmp ne i37 %A, 0
+ ret i1 %B
+}
+
+define i7 @test5(i7 %A, i7* %P) {
+ %B = or i7 %A, 3
+ %C = xor i7 %B, 12
+ store i7 %C, i7* %P
+ %r = and i7 %C, 3
+ ret i7 %r
+}
+
+define i7 @test6(i7 %A, i7 %B) {
+ ;; ~(~X & Y) --> (X | ~Y)
+ %t0 = xor i7 %A, -1
+ %t1 = and i7 %t0, %B
+ %r = xor i7 %t1, -1
+ ret i7 %r
+}
+
+define i47 @test7(i47 %A) {
+ %X = ashr i47 %A, 39 ;; sign extend
+ %C1 = and i47 %X, 255
+ ret i47 %C1
+}
diff --git a/test/Transforms/InstCombine/apint-and2.ll b/test/Transforms/InstCombine/apint-and2.ll
new file mode 100644
index 0000000..f7b3934
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-and2.ll
@@ -0,0 +1,82 @@
+; This test makes sure that and instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep {and }
+; END.
+
+
+define i999 @test0(i999 %A) {
+ %B = and i999 %A, 0 ; zero result
+ ret i999 %B
+}
+
+define i477 @test1(i477 %A, i477 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i477 %A, -1
+ %NotB = xor i477 %B, -1
+ %C1 = and i477 %NotA, %NotB
+ ret i477 %C1
+}
+
+define i129 @tst(i129 %A, i129 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i129 %A, -1
+ %NotB = xor i129 %B, -1
+ %C1 = and i129 %NotA, %NotB
+ ret i129 %C1
+}
+
+define i65 @test(i65 %A, i65 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i65 %A, -1
+ %NotB = xor i65 -1, %B
+ %C1 = and i65 %NotA, %NotB
+ ret i65 %C1
+}
+
+define i66 @tes(i66 %A, i66 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i66 %A, -1
+ %NotB = xor i66 %B, -1
+ %C1 = and i66 %NotA, %NotB
+ ret i66 %C1
+}
+
+define i1005 @test2(i1005 %x) {
+ %tmp.2 = and i1005 %x, -1 ; noop
+ ret i1005 %tmp.2
+}
+
+define i123 @test3(i123 %x) {
+ %tmp.0 = and i123 %x, 127
+ %tmp.2 = and i123 %tmp.0, 128
+ ret i123 %tmp.2
+}
+
+define i1 @test4(i737 %x) {
+ %A = and i737 %x, -2147483648
+ %B = icmp ne i737 %A, 0
+ ret i1 %B
+}
+
+define i117 @test5(i117 %A, i117* %P) {
+ %B = or i117 %A, 3
+ %C = xor i117 %B, 12
+ store i117 %C, i117* %P
+ %r = and i117 %C, 3
+ ret i117 %r
+}
+
+define i117 @test6(i117 %A, i117 %B) {
+ ;; ~(~X & Y) --> (X | ~Y)
+ %t0 = xor i117 %A, -1
+ %t1 = and i117 %t0, %B
+ %r = xor i117 %t1, -1
+ ret i117 %r
+}
+
+define i1024 @test7(i1024 %A) {
+ %X = ashr i1024 %A, 1016 ;; sign extend
+ %C1 = and i1024 %X, 255
+ ret i1024 %C1
+}
diff --git a/test/Transforms/InstCombine/apint-call-cast-target.ll b/test/Transforms/InstCombine/apint-call-cast-target.ll
new file mode 100644
index 0000000..0f87a53
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-call-cast-target.ll
@@ -0,0 +1,13 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep call | not grep bitcast
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+
+
+define i32 @main() {
+entry:
+ %tmp = call i32 bitcast (i7* (i999*)* @ctime to i32 (i99*)*)( i99* null )
+ ret i32 %tmp
+}
+
+declare i7* @ctime(i999*)
diff --git a/test/Transforms/InstCombine/apint-cast-and-cast.ll b/test/Transforms/InstCombine/apint-cast-and-cast.ll
new file mode 100644
index 0000000..337fd7c
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-cast-and-cast.ll
@@ -0,0 +1,15 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep bitcast
+
+define i19 @test1(i43 %val) {
+ %t1 = bitcast i43 %val to i43
+ %t2 = and i43 %t1, 1
+ %t3 = trunc i43 %t2 to i19
+ ret i19 %t3
+}
+
+define i73 @test2(i677 %val) {
+ %t1 = bitcast i677 %val to i677
+ %t2 = and i677 %t1, 1
+ %t3 = trunc i677 %t2 to i73
+ ret i73 %t3
+}
diff --git a/test/Transforms/InstCombine/apint-cast-cast-to-and.ll b/test/Transforms/InstCombine/apint-cast-cast-to-and.ll
new file mode 100644
index 0000000..29a8869
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-cast-cast-to-and.ll
@@ -0,0 +1,8 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep i41
+
+define i61 @test1(i61 %X) {
+ %Y = trunc i61 %X to i41 ;; Turn i61o an AND
+ %Z = zext i41 %Y to i61
+ ret i61 %Z
+}
+
diff --git a/test/Transforms/InstCombine/apint-cast.ll b/test/Transforms/InstCombine/apint-cast.ll
new file mode 100644
index 0000000..dd00146
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-cast.ll
@@ -0,0 +1,20 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | notcast
+
+define i17 @test1(i17 %a) {
+ %tmp = zext i17 %a to i37 ; <i37> [#uses=2]
+ %tmp21 = lshr i37 %tmp, 8 ; <i37> [#uses=1]
+ %tmp5 = shl i37 %tmp, 8 ; <i37> [#uses=1]
+ %tmp.upgrd.32 = or i37 %tmp21, %tmp5 ; <i37> [#uses=1]
+ %tmp.upgrd.3 = trunc i37 %tmp.upgrd.32 to i17 ; <i17> [#uses=1]
+ ret i17 %tmp.upgrd.3
+}
+
+define i167 @test2(i167 %a) {
+ %tmp = zext i167 %a to i577 ; <i577> [#uses=2]
+ %tmp21 = lshr i577 %tmp, 9 ; <i577> [#uses=1]
+ %tmp5 = shl i577 %tmp, 8 ; <i577> [#uses=1]
+ %tmp.upgrd.32 = or i577 %tmp21, %tmp5 ; <i577> [#uses=1]
+ %tmp.upgrd.3 = trunc i577 %tmp.upgrd.32 to i167 ; <i167> [#uses=1]
+ ret i167 %tmp.upgrd.3
+}
diff --git a/test/Transforms/InstCombine/apint-div1.ll b/test/Transforms/InstCombine/apint-div1.ll
new file mode 100644
index 0000000..e9aa579
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-div1.ll
@@ -0,0 +1,22 @@
+; This test makes sure that div instructions are properly eliminated.
+; This test is for Integer BitWidth < 64 && BitWidth % 2 != 0.
+;
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep div
+
+
+define i33 @test1(i33 %X) {
+ %Y = udiv i33 %X, 4096
+ ret i33 %Y
+}
+
+define i49 @test2(i49 %X) {
+ %tmp.0 = shl i49 4096, 17
+ %Y = udiv i49 %X, %tmp.0
+ ret i49 %Y
+}
+
+define i59 @test3(i59 %X, i1 %C) {
+ %V = select i1 %C, i59 1024, i59 4096
+ %R = udiv i59 %X, %V
+ ret i59 %R
+}
diff --git a/test/Transforms/InstCombine/apint-div2.ll b/test/Transforms/InstCombine/apint-div2.ll
new file mode 100644
index 0000000..2aa2c3a
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-div2.ll
@@ -0,0 +1,22 @@
+; This test makes sure that div instructions are properly eliminated.
+; This test is for Integer BitWidth >= 64 && BitWidth <= 1024.
+;
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep div
+
+
+define i333 @test1(i333 %X) {
+ %Y = udiv i333 %X, 70368744177664
+ ret i333 %Y
+}
+
+define i499 @test2(i499 %X) {
+ %tmp.0 = shl i499 4096, 197
+ %Y = udiv i499 %X, %tmp.0
+ ret i499 %Y
+}
+
+define i599 @test3(i599 %X, i1 %C) {
+ %V = select i1 %C, i599 70368744177664, i599 4096
+ %R = udiv i599 %X, %V
+ ret i599 %R
+}
diff --git a/test/Transforms/InstCombine/apint-elim-logicalops.ll b/test/Transforms/InstCombine/apint-elim-logicalops.ll
new file mode 100644
index 0000000..13d032c
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-elim-logicalops.ll
@@ -0,0 +1,39 @@
+; Test that elimination of logical operators works with
+; arbitrary precision integers.
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: not grep {(and\|xor\|add\|shl\|shr)}
+; END.
+
+define i33 @test1(i33 %x) {
+ %tmp.1 = and i33 %x, 65535 ; <i33> [#uses=1]
+ %tmp.2 = xor i33 %tmp.1, -32768 ; <i33> [#uses=1]
+ %tmp.3 = add i33 %tmp.2, 32768 ; <i33> [#uses=1]
+ ret i33 %tmp.3
+}
+
+define i33 @test2(i33 %x) {
+ %tmp.1 = and i33 %x, 65535 ; <i33> [#uses=1]
+ %tmp.2 = xor i33 %tmp.1, 32768 ; <i33> [#uses=1]
+ %tmp.3 = add i33 %tmp.2, -32768 ; <i33> [#uses=1]
+ ret i33 %tmp.3
+}
+
+define i33 @test3(i16 %P) {
+ %tmp.1 = zext i16 %P to i33 ; <i33> [#uses=1]
+ %tmp.4 = xor i33 %tmp.1, 32768 ; <i33> [#uses=1]
+ %tmp.5 = add i33 %tmp.4, -32768 ; <i33> [#uses=1]
+ ret i33 %tmp.5
+}
+
+define i33 @test5(i33 %x) {
+ %tmp.1 = and i33 %x, 254
+ %tmp.2 = xor i33 %tmp.1, 128
+ %tmp.3 = add i33 %tmp.2, -128
+ ret i33 %tmp.3
+}
+
+define i33 @test6(i33 %x) {
+ %tmp.2 = shl i33 %x, 16 ; <i33> [#uses=1]
+ %tmp.4 = lshr i33 %tmp.2, 16 ; <i33> [#uses=1]
+ ret i33 %tmp.4
+}
diff --git a/test/Transforms/InstCombine/apint-mul1.ll b/test/Transforms/InstCombine/apint-mul1.ll
new file mode 100644
index 0000000..36b1102
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-mul1.ll
@@ -0,0 +1,11 @@
+; This test makes sure that mul instructions are properly eliminated.
+; This test is for Integer BitWidth < 64 && BitWidth % 2 != 0.
+;
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep mul
+
+
+define i17 @test1(i17 %X) {
+ %Y = mul i17 %X, 1024
+ ret i17 %Y
+}
diff --git a/test/Transforms/InstCombine/apint-mul2.ll b/test/Transforms/InstCombine/apint-mul2.ll
new file mode 100644
index 0000000..72fd97a
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-mul2.ll
@@ -0,0 +1,12 @@
+; This test makes sure that mul instructions are properly eliminated.
+; This test is for Integer BitWidth >= 64 && BitWidth % 2 >= 1024.
+;
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep mul
+
+
+define i177 @test1(i177 %X) {
+ %C = shl i177 1, 155
+ %Y = mul i177 %X, %C
+ ret i177 %Y
+}
diff --git a/test/Transforms/InstCombine/apint-not.ll b/test/Transforms/InstCombine/apint-not.ll
new file mode 100644
index 0000000..f557fa8
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-not.ll
@@ -0,0 +1,42 @@
+; This test makes sure that the xor instructions are properly eliminated
+; when arbitrary precision integers are used.
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep xor
+
+define i33 @test1(i33 %A) {
+ %B = xor i33 %A, -1
+ %C = xor i33 %B, -1
+ ret i33 %C
+}
+
+define i1 @test2(i52 %A, i52 %B) {
+ %cond = icmp ule i52 %A, %B ; Can change into uge
+ %Ret = xor i1 %cond, true
+ ret i1 %Ret
+}
+
+; Test that demorgans law can be instcombined
+define i47 @test3(i47 %A, i47 %B) {
+ %a = xor i47 %A, -1
+ %b = xor i47 %B, -1
+ %c = and i47 %a, %b
+ %d = xor i47 %c, -1
+ ret i47 %d
+}
+
+; Test that demorgens law can work with constants
+define i61 @test4(i61 %A, i61 %B) {
+ %a = xor i61 %A, -1
+ %c = and i61 %a, 5 ; 5 = ~c2
+ %d = xor i61 %c, -1
+ ret i61 %d
+}
+
+; test the mirror of demorgans law...
+define i71 @test5(i71 %A, i71 %B) {
+ %a = xor i71 %A, -1
+ %b = xor i71 %B, -1
+ %c = or i71 %a, %b
+ %d = xor i71 %c, -1
+ ret i71 %d
+}
diff --git a/test/Transforms/InstCombine/apint-or1.ll b/test/Transforms/InstCombine/apint-or1.ll
new file mode 100644
index 0000000..51b87fe
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-or1.ll
@@ -0,0 +1,36 @@
+; This test makes sure that or instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 2 != 0.
+;
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep or
+
+
+define i7 @test0(i7 %X) {
+ %Y = or i7 %X, 0
+ ret i7 %Y
+}
+
+define i17 @test1(i17 %X) {
+ %Y = or i17 %X, -1
+ ret i17 %Y
+}
+
+define i23 @test2(i23 %A) {
+ ;; A | ~A == -1
+ %NotA = xor i23 -1, %A
+ %B = or i23 %A, %NotA
+ ret i23 %B
+}
+
+define i39 @test3(i39 %V, i39 %M) {
+ ;; If we have: ((V + N) & C1) | (V & C2)
+ ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
+ ;; replace with V+N.
+ %C1 = xor i39 274877906943, -1 ;; C2 = 274877906943
+ %N = and i39 %M, 274877906944
+ %A = add i39 %V, %N
+ %B = and i39 %A, %C1
+ %D = and i39 %V, 274877906943
+ %R = or i39 %B, %D
+ ret i39 %R
+}
diff --git a/test/Transforms/InstCombine/apint-or2.ll b/test/Transforms/InstCombine/apint-or2.ll
new file mode 100644
index 0000000..21dc565
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-or2.ll
@@ -0,0 +1,35 @@
+; This test makes sure that or instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+;
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep or
+
+
+define i777 @test0(i777 %X) {
+ %Y = or i777 %X, 0
+ ret i777 %Y
+}
+
+define i117 @test1(i117 %X) {
+ %Y = or i117 %X, -1
+ ret i117 %Y
+}
+
+define i1023 @test2(i1023 %A) {
+ ;; A | ~A == -1
+ %NotA = xor i1023 -1, %A
+ %B = or i1023 %A, %NotA
+ ret i1023 %B
+}
+
+define i399 @test3(i399 %V, i399 %M) {
+ ;; If we have: ((V + N) & C1) | (V & C2)
+ ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
+ ;; replace with V+N.
+ %C1 = xor i399 274877906943, -1 ;; C2 = 274877906943
+ %N = and i399 %M, 18446742974197923840
+ %A = add i399 %V, %N
+ %B = and i399 %A, %C1
+ %D = and i399 %V, 274877906943
+ %R = or i399 %B, %D
+ ret i399 %R
+}
diff --git a/test/Transforms/InstCombine/apint-rem1.ll b/test/Transforms/InstCombine/apint-rem1.ll
new file mode 100644
index 0000000..2ec8c74
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-rem1.ll
@@ -0,0 +1,22 @@
+; This test makes sure that these instructions are properly eliminated.
+; This test is for Integer BitWidth < 64 && BitWidth % 2 != 0.
+;
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep rem
+
+
+define i33 @test1(i33 %A) {
+ %B = urem i33 %A, 4096
+ ret i33 %B
+}
+
+define i49 @test2(i49 %A) {
+ %B = shl i49 4096, 11
+ %Y = urem i49 %A, %B
+ ret i49 %Y
+}
+
+define i59 @test3(i59 %X, i1 %C) {
+ %V = select i1 %C, i59 70368744177664, i59 4096
+ %R = urem i59 %X, %V
+ ret i59 %R
+}
diff --git a/test/Transforms/InstCombine/apint-rem2.ll b/test/Transforms/InstCombine/apint-rem2.ll
new file mode 100644
index 0000000..4d22c22
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-rem2.ll
@@ -0,0 +1,22 @@
+; This test makes sure that these instructions are properly eliminated.
+; This test is for Integer BitWidth >= 64 && BitWidth <= 1024.
+;
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep rem
+
+
+define i333 @test1(i333 %A) {
+ %B = urem i333 %A, 70368744177664
+ ret i333 %B
+}
+
+define i499 @test2(i499 %A) {
+ %B = shl i499 4096, 111
+ %Y = urem i499 %A, %B
+ ret i499 %Y
+}
+
+define i599 @test3(i599 %X, i1 %C) {
+ %V = select i1 %C, i599 70368744177664, i599 4096
+ %R = urem i599 %X, %V
+ ret i599 %R
+}
diff --git a/test/Transforms/InstCombine/apint-select.ll b/test/Transforms/InstCombine/apint-select.ll
new file mode 100644
index 0000000..c2399fb
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-select.ll
@@ -0,0 +1,44 @@
+; This test makes sure that these instructions are properly eliminated.
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep select
+
+
+define i41 @test1(i1 %C) {
+ %V = select i1 %C, i41 1, i41 0 ; V = C
+ ret i41 %V
+}
+
+define i999 @test2(i1 %C) {
+ %V = select i1 %C, i999 0, i999 1 ; V = C
+ ret i999 %V
+}
+
+define i41 @test3(i41 %X) {
+ ;; (x <s 0) ? -1 : 0 -> ashr x, 31
+ %t = icmp slt i41 %X, 0
+ %V = select i1 %t, i41 -1, i41 0
+ ret i41 %V
+}
+
+define i1023 @test4(i1023 %X) {
+ ;; (x <s 0) ? -1 : 0 -> ashr x, 31
+ %t = icmp slt i1023 %X, 0
+ %V = select i1 %t, i1023 -1, i1023 0
+ ret i1023 %V
+}
+
+define i41 @test5(i41 %X) {
+ ;; ((X & 27) ? 27 : 0)
+ %Y = and i41 %X, 32
+ %t = icmp ne i41 %Y, 0
+ %V = select i1 %t, i41 32, i41 0
+ ret i41 %V
+}
+
+define i1023 @test6(i1023 %X) {
+ ;; ((X & 27) ? 27 : 0)
+ %Y = and i1023 %X, 64
+ %t = icmp ne i1023 %Y, 0
+ %V = select i1 %t, i1023 64, i1023 0
+ ret i1023 %V
+}
diff --git a/test/Transforms/InstCombine/apint-shift-simplify.ll b/test/Transforms/InstCombine/apint-shift-simplify.ll
new file mode 100644
index 0000000..4c352cf
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-shift-simplify.ll
@@ -0,0 +1,23 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: egrep {shl|lshr|ashr} | wc -l | grep 3
+
+define i41 @test0(i41 %A, i41 %B, i41 %C) {
+ %X = shl i41 %A, %C
+ %Y = shl i41 %B, %C
+ %Z = and i41 %X, %Y
+ ret i41 %Z
+}
+
+define i57 @test1(i57 %A, i57 %B, i57 %C) {
+ %X = lshr i57 %A, %C
+ %Y = lshr i57 %B, %C
+ %Z = or i57 %X, %Y
+ ret i57 %Z
+}
+
+define i49 @test2(i49 %A, i49 %B, i49 %C) {
+ %X = ashr i49 %A, %C
+ %Y = ashr i49 %B, %C
+ %Z = xor i49 %X, %Y
+ ret i49 %Z
+}
diff --git a/test/Transforms/InstCombine/apint-shift.ll b/test/Transforms/InstCombine/apint-shift.ll
new file mode 100644
index 0000000..afc5360
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-shift.ll
@@ -0,0 +1,191 @@
+; This test makes sure that shit instructions are properly eliminated
+; even with arbitrary precision integers.
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep sh
+; END.
+
+define i47 @test1(i47 %A) {
+ %B = shl i47 %A, 0 ; <i47> [#uses=1]
+ ret i47 %B
+}
+
+define i41 @test2(i7 %X) {
+ %A = zext i7 %X to i41 ; <i41> [#uses=1]
+ %B = shl i41 0, %A ; <i41> [#uses=1]
+ ret i41 %B
+}
+
+define i41 @test3(i41 %A) {
+ %B = ashr i41 %A, 0 ; <i41> [#uses=1]
+ ret i41 %B
+}
+
+define i39 @test4(i7 %X) {
+ %A = zext i7 %X to i39 ; <i39> [#uses=1]
+ %B = ashr i39 0, %A ; <i39> [#uses=1]
+ ret i39 %B
+}
+
+define i55 @test5(i55 %A) {
+ %B = lshr i55 %A, 55 ; <i55> [#uses=1]
+ ret i55 %B
+}
+
+define i32 @test5a(i32 %A) {
+ %B = shl i32 %A, 32 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i55 @test6(i55 %A) {
+ %B = shl i55 %A, 1 ; <i55> [#uses=1]
+ %C = mul i55 %B, 3 ; <i55> [#uses=1]
+ ret i55 %C
+}
+
+define i29 @test7(i8 %X) {
+ %A = zext i8 %X to i29 ; <i29> [#uses=1]
+ %B = ashr i29 -1, %A ; <i29> [#uses=1]
+ ret i29 %B
+}
+
+define i7 @test8(i7 %A) {
+ %B = shl i7 %A, 4 ; <i7> [#uses=1]
+ %C = shl i7 %B, 3 ; <i7> [#uses=1]
+ ret i7 %C
+}
+
+define i17 @test9(i17 %A) {
+ %B = shl i17 %A, 16 ; <i17> [#uses=1]
+ %C = lshr i17 %B, 16 ; <i17> [#uses=1]
+ ret i17 %C
+}
+
+define i19 @test10(i19 %A) {
+ %B = lshr i19 %A, 18 ; <i19> [#uses=1]
+ %C = shl i19 %B, 18 ; <i19> [#uses=1]
+ ret i19 %C
+}
+
+define i23 @test11(i23 %A) {
+ %a = mul i23 %A, 3 ; <i23> [#uses=1]
+ %B = lshr i23 %a, 11 ; <i23> [#uses=1]
+ %C = shl i23 %B, 12 ; <i23> [#uses=1]
+ ret i23 %C
+}
+
+define i47 @test12(i47 %A) {
+ %B = ashr i47 %A, 8 ; <i47> [#uses=1]
+ %C = shl i47 %B, 8 ; <i47> [#uses=1]
+ ret i47 %C
+}
+
+define i18 @test13(i18 %A) {
+ %a = mul i18 %A, 3 ; <i18> [#uses=1]
+ %B = ashr i18 %a, 8 ; <i18> [#uses=1]
+ %C = shl i18 %B, 9 ; <i18> [#uses=1]
+ ret i18 %C
+}
+
+define i35 @test14(i35 %A) {
+ %B = lshr i35 %A, 4 ; <i35> [#uses=1]
+ %C = or i35 %B, 1234 ; <i35> [#uses=1]
+ %D = shl i35 %C, 4 ; <i35> [#uses=1]
+ ret i35 %D
+}
+
+define i79 @test14a(i79 %A) {
+ %B = shl i79 %A, 4 ; <i79> [#uses=1]
+ %C = and i79 %B, 1234 ; <i79> [#uses=1]
+ %D = lshr i79 %C, 4 ; <i79> [#uses=1]
+ ret i79 %D
+}
+
+define i45 @test15(i1 %C) {
+ %A = select i1 %C, i45 3, i45 1 ; <i45> [#uses=1]
+ %V = shl i45 %A, 2 ; <i45> [#uses=1]
+ ret i45 %V
+}
+
+define i53 @test15a(i1 %X) {
+ %A = select i1 %X, i8 3, i8 1 ; <i8> [#uses=1]
+ %B = zext i8 %A to i53 ; <i53> [#uses=1]
+ %V = shl i53 64, %B ; <i53> [#uses=1]
+ ret i53 %V
+}
+
+define i1 @test16(i84 %X) {
+ %tmp.3 = ashr i84 %X, 4 ; <i84> [#uses=1]
+ %tmp.6 = and i84 %tmp.3, 1 ; <i84> [#uses=1]
+ %tmp.7 = icmp ne i84 %tmp.6, 0 ; <i1> [#uses=1]
+ ret i1 %tmp.7
+}
+
+define i1 @test17(i106 %A) {
+ %B = lshr i106 %A, 3 ; <i106> [#uses=1]
+ %C = icmp eq i106 %B, 1234 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test18(i11 %A) {
+ %B = lshr i11 %A, 10 ; <i11> [#uses=1]
+ %C = icmp eq i11 %B, 123 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test19(i37 %A) {
+ %B = ashr i37 %A, 2 ; <i37> [#uses=1]
+ %C = icmp eq i37 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test19a(i39 %A) {
+ %B = ashr i39 %A, 2 ; <i39> [#uses=1]
+ %C = icmp eq i39 %B, -1 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test20(i13 %A) {
+ %B = ashr i13 %A, 12 ; <i13> [#uses=1]
+ %C = icmp eq i13 %B, 123 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test21(i12 %A) {
+ %B = shl i12 %A, 6 ; <i12> [#uses=1]
+ %C = icmp eq i12 %B, -128 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test22(i14 %A) {
+ %B = shl i14 %A, 7 ; <i14> [#uses=1]
+ %C = icmp eq i14 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i11 @test23(i44 %A) {
+ %B = shl i44 %A, 33 ; <i44> [#uses=1]
+ %C = ashr i44 %B, 33 ; <i44> [#uses=1]
+ %D = trunc i44 %C to i11 ; <i8> [#uses=1]
+ ret i11 %D
+}
+
+define i17 @test24(i17 %X) {
+ %Y = and i17 %X, -5 ; <i17> [#uses=1]
+ %Z = shl i17 %Y, 9 ; <i17> [#uses=1]
+ %Q = ashr i17 %Z, 9 ; <i17> [#uses=1]
+ ret i17 %Q
+}
+
+define i37 @test25(i37 %tmp.2, i37 %AA) {
+ %x = lshr i37 %AA, 17 ; <i37> [#uses=1]
+ %tmp.3 = lshr i37 %tmp.2, 17 ; <i37> [#uses=1]
+ %tmp.5 = add i37 %tmp.3, %x ; <i37> [#uses=1]
+ %tmp.6 = shl i37 %tmp.5, 17 ; <i37> [#uses=1]
+ ret i37 %tmp.6
+}
+
+define i40 @test26(i40 %A) {
+ %B = lshr i40 %A, 1 ; <i40> [#uses=1]
+ %C = bitcast i40 %B to i40 ; <i40> [#uses=1]
+ %D = shl i40 %C, 1 ; <i40> [#uses=1]
+ ret i40 %D
+}
diff --git a/test/Transforms/InstCombine/apint-shl-trunc.ll b/test/Transforms/InstCombine/apint-shl-trunc.ll
new file mode 100644
index 0000000..a9cffde
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-shl-trunc.ll
@@ -0,0 +1,14 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep shl
+; END.
+
+define i1 @test0(i39 %X, i39 %A) {
+ %B = lshr i39 %X, %A
+ %D = trunc i39 %B to i1
+ ret i1 %D
+}
+
+define i1 @test1(i799 %X, i799 %A) {
+ %B = lshr i799 %X, %A
+ %D = trunc i799 %B to i1
+ ret i1 %D
+}
diff --git a/test/Transforms/InstCombine/apint-sub.ll b/test/Transforms/InstCombine/apint-sub.ll
new file mode 100644
index 0000000..12f366d
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-sub.ll
@@ -0,0 +1,139 @@
+; This test makes sure that sub instructions are properly eliminated
+; even with arbitrary precision integers.
+;
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: grep -v {sub i19 %Cok, %Bok} | not grep sub
+; END.
+
+define i23 @test1(i23 %A) {
+ %B = sub i23 %A, %A ; <i23> [#uses=1]
+ ret i23 %B
+}
+
+define i47 @test2(i47 %A) {
+ %B = sub i47 %A, 0 ; <i47> [#uses=1]
+ ret i47 %B
+}
+
+define i97 @test3(i97 %A) {
+ %B = sub i97 0, %A ; <i97> [#uses=1]
+ %C = sub i97 0, %B ; <i97> [#uses=1]
+ ret i97 %C
+}
+
+define i108 @test4(i108 %A, i108 %x) {
+ %B = sub i108 0, %A ; <i108> [#uses=1]
+ %C = sub i108 %x, %B ; <i108> [#uses=1]
+ ret i108 %C
+}
+
+define i19 @test5(i19 %A, i19 %Bok, i19 %Cok) {
+ %D = sub i19 %Bok, %Cok ; <i19> [#uses=1]
+ %E = sub i19 %A, %D ; <i19> [#uses=1]
+ ret i19 %E
+}
+
+define i57 @test6(i57 %A, i57 %B) {
+ %C = and i57 %A, %B ; <i57> [#uses=1]
+ %D = sub i57 %A, %C ; <i57> [#uses=1]
+ ret i57 %D
+}
+
+define i77 @test7(i77 %A) {
+ %B = sub i77 -1, %A ; <i77> [#uses=1]
+ ret i77 %B
+}
+
+define i27 @test8(i27 %A) {
+ %B = mul i27 9, %A ; <i27> [#uses=1]
+ %C = sub i27 %B, %A ; <i27> [#uses=1]
+ ret i27 %C
+}
+
+define i42 @test9(i42 %A) {
+ %B = mul i42 3, %A ; <i42> [#uses=1]
+ %C = sub i42 %A, %B ; <i42> [#uses=1]
+ ret i42 %C
+}
+
+define i124 @test10(i124 %A, i124 %B) {
+ %C = sub i124 0, %A ; <i124> [#uses=1]
+ %D = sub i124 0, %B ; <i124> [#uses=1]
+ %E = mul i124 %C, %D ; <i124> [#uses=1]
+ ret i124 %E
+}
+
+define i55 @test10a(i55 %A) {
+ %C = sub i55 0, %A ; <i55> [#uses=1]
+ %E = mul i55 %C, 7 ; <i55> [#uses=1]
+ ret i55 %E
+}
+
+define i1 @test11(i9 %A, i9 %B) {
+ %C = sub i9 %A, %B ; <i9> [#uses=1]
+ %cD = icmp ne i9 %C, 0 ; <i1> [#uses=1]
+ ret i1 %cD
+}
+
+define i43 @test12(i43 %A) {
+ %B = ashr i43 %A, 42 ; <i43> [#uses=1]
+ %C = sub i43 0, %B ; <i43> [#uses=1]
+ ret i43 %C
+}
+
+define i79 @test13(i79 %A) {
+ %B = lshr i79 %A, 78 ; <i79> [#uses=1]
+ %C = sub i79 0, %B ; <i79> [#uses=1]
+ ret i79 %C
+}
+
+define i1024 @test14(i1024 %A) {
+ %B = lshr i1024 %A, 1023 ; <i1024> [#uses=1]
+ %C = bitcast i1024 %B to i1024 ; <i1024> [#uses=1]
+ %D = sub i1024 0, %C ; <i1024> [#uses=1]
+ ret i1024 %D
+}
+
+define i14 @test15(i14 %A, i14 %B) {
+ %C = sub i14 0, %A ; <i14> [#uses=1]
+ %D = srem i14 %B, %C ; <i14> [#uses=1]
+ ret i14 %D
+}
+
+define i51 @test16(i51 %A) {
+ %X = sdiv i51 %A, 1123 ; <i51> [#uses=1]
+ %Y = sub i51 0, %X ; <i51> [#uses=1]
+ ret i51 %Y
+}
+
+define i25 @test17(i25 %A) {
+ %B = sub i25 0, %A ; <i25> [#uses=1]
+ %C = sdiv i25 %B, 1234 ; <i25> [#uses=1]
+ ret i25 %C
+}
+
+define i128 @test18(i128 %Y) {
+ %tmp.4 = shl i128 %Y, 2 ; <i128> [#uses=1]
+ %tmp.12 = shl i128 %Y, 2 ; <i128> [#uses=1]
+ %tmp.8 = sub i128 %tmp.4, %tmp.12 ; <i128> [#uses=1]
+ ret i128 %tmp.8
+}
+
+define i39 @test19(i39 %X, i39 %Y) {
+ %Z = sub i39 %X, %Y ; <i39> [#uses=1]
+ %Q = add i39 %Z, %Y ; <i39> [#uses=1]
+ ret i39 %Q
+}
+
+define i1 @test20(i33 %g, i33 %h) {
+ %tmp.2 = sub i33 %g, %h ; <i33> [#uses=1]
+ %tmp.4 = icmp ne i33 %tmp.2, %g ; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
+
+define i1 @test21(i256 %g, i256 %h) {
+ %tmp.2 = sub i256 %g, %h ; <i256> [#uses=1]
+ %tmp.4 = icmp ne i256 %tmp.2, %g; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
diff --git a/test/Transforms/InstCombine/apint-xor1.ll b/test/Transforms/InstCombine/apint-xor1.ll
new file mode 100644
index 0000000..5ddf5cf
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-xor1.ll
@@ -0,0 +1,50 @@
+; This test makes sure that xor instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 8 != 0.
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep {xor }
+
+
+define i47 @test1(i47 %A, i47 %B) {
+ ;; (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+ %A1 = and i47 %A, 70368744177664
+ %B1 = and i47 %B, 70368744177661
+ %C1 = xor i47 %A1, %B1
+ ret i47 %C1
+}
+
+define i15 @test2(i15 %x) {
+ %tmp.2 = xor i15 %x, 0
+ ret i15 %tmp.2
+}
+
+define i23 @test3(i23 %x) {
+ %tmp.2 = xor i23 %x, %x
+ ret i23 %tmp.2
+}
+
+define i37 @test4(i37 %x) {
+ ; x ^ ~x == -1
+ %NotX = xor i37 -1, %x
+ %B = xor i37 %x, %NotX
+ ret i37 %B
+}
+
+define i7 @test5(i7 %A) {
+ ;; (A|B)^B == A & (~B)
+ %t1 = or i7 %A, 23
+ %r = xor i7 %t1, 23
+ ret i7 %r
+}
+
+define i7 @test6(i7 %A) {
+ %t1 = xor i7 %A, 23
+ %r = xor i7 %t1, 23
+ ret i7 %r
+}
+
+define i47 @test7(i47 %A) {
+ ;; (A | C1) ^ C2 -> (A | C1) & ~C2 iff (C1&C2) == C2
+ %B1 = or i47 %A, 70368744177663
+ %C1 = xor i47 %B1, 703687463
+ ret i47 %C1
+}
diff --git a/test/Transforms/InstCombine/apint-xor2.ll b/test/Transforms/InstCombine/apint-xor2.ll
new file mode 100644
index 0000000..4d2d415
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-xor2.ll
@@ -0,0 +1,51 @@
+; This test makes sure that xor instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep {xor }
+; END.
+
+
+define i447 @test1(i447 %A, i447 %B) {
+ ;; (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+ %A1 = and i447 %A, 70368744177664
+ %B1 = and i447 %B, 70368744177663
+ %C1 = xor i447 %A1, %B1
+ ret i447 %C1
+}
+
+define i1005 @test2(i1005 %x) {
+ %tmp.2 = xor i1005 %x, 0
+ ret i1005 %tmp.2
+}
+
+define i123 @test3(i123 %x) {
+ %tmp.2 = xor i123 %x, %x
+ ret i123 %tmp.2
+}
+
+define i737 @test4(i737 %x) {
+ ; x ^ ~x == -1
+ %NotX = xor i737 -1, %x
+ %B = xor i737 %x, %NotX
+ ret i737 %B
+}
+
+define i700 @test5(i700 %A) {
+ ;; (A|B)^B == A & (~B)
+ %t1 = or i700 %A, 288230376151711743
+ %r = xor i700 %t1, 288230376151711743
+ ret i700 %r
+}
+
+define i77 @test6(i77 %A) {
+ %t1 = xor i77 %A, 23
+ %r = xor i77 %t1, 23
+ ret i77 %r
+}
+
+define i1023 @test7(i1023 %A) {
+ ;; (A | C1) ^ C2 -> (A | C1) & ~C2 iff (C1&C2) == C2
+ %B1 = or i1023 %A, 70368744177663
+ %C1 = xor i1023 %B1, 703687463
+ ret i1023 %C1
+}
diff --git a/test/Transforms/InstCombine/apint-zext1.ll b/test/Transforms/InstCombine/apint-zext1.ll
new file mode 100644
index 0000000..03330c7
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-zext1.ll
@@ -0,0 +1,9 @@
+; Tests to make sure elimination of casts is working correctly
+; This test is for Integer BitWidth <= 64 && BitWidth % 2 != 0.
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | notcast {} {%c1.*}
+
+define i47 @test_sext_zext(i11 %A) {
+ %c1 = zext i11 %A to i39
+ %c2 = sext i39 %c1 to i47
+ ret i47 %c2
+}
diff --git a/test/Transforms/InstCombine/apint-zext2.ll b/test/Transforms/InstCombine/apint-zext2.ll
new file mode 100644
index 0000000..8350d10
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-zext2.ll
@@ -0,0 +1,9 @@
+; Tests to make sure elimination of casts is working correctly
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | notcast {} {%c1.*}
+
+define i1024 @test_sext_zext(i77 %A) {
+ %c1 = zext i77 %A to i533
+ %c2 = sext i533 %c1 to i1024
+ ret i1024 %c2
+}
diff --git a/test/Transforms/InstCombine/binop-cast.ll b/test/Transforms/InstCombine/binop-cast.ll
new file mode 100644
index 0000000..ea5299b
--- /dev/null
+++ b/test/Transforms/InstCombine/binop-cast.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | notcast
+
+define i32 @testAdd(i32 %X, i32 %Y) {
+ %tmp = add i32 %X, %Y
+ %tmp.l = bitcast i32 %tmp to i32
+ ret i32 %tmp.l
+}
diff --git a/test/Transforms/InstCombine/bit-tracking.ll b/test/Transforms/InstCombine/bit-tracking.ll
new file mode 100644
index 0000000..2575ae5
--- /dev/null
+++ b/test/Transforms/InstCombine/bit-tracking.ll
@@ -0,0 +1,26 @@
+; This file contains various testcases that require tracking whether bits are
+; set or cleared by various instructions.
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -instcombine | llvm-dis |\
+; RUN: not grep %ELIM
+
+; Reduce down to a single XOR
+int %test3(int %B) {
+ %ELIMinc = and int %B, 1
+ %tmp.5 = xor int %ELIMinc, 1
+ %ELIM7 = and int %B, -2
+ %tmp.8 = or int %tmp.5, %ELIM7
+ ret int %tmp.8
+}
+
+; Finally, a bigger case where we chain things together. This corresponds to
+; incrementing a single-bit bitfield, which should become just an xor.
+int %test4(int %B) {
+ %ELIM3 = shl int %B, ubyte 31
+ %ELIM4 = shr int %ELIM3, ubyte 31
+ %inc = add int %ELIM4, 1
+ %ELIM5 = and int %inc, 1
+ %ELIM7 = and int %B, -2
+ %tmp.8 = or int %ELIM5, %ELIM7
+ ret int %tmp.8
+}
+
diff --git a/test/Transforms/InstCombine/bitcast-gep.ll b/test/Transforms/InstCombine/bitcast-gep.ll
new file mode 100644
index 0000000..5a514ab
--- /dev/null
+++ b/test/Transforms/InstCombine/bitcast-gep.ll
@@ -0,0 +1,19 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep bitcast
+; RUN: llvm-as < %s | opt -instcombine -scalarrepl | llvm-dis | grep {ret i8. %v}
+; PR1345
+
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "powerpc-apple-darwin8.8.0"
+
+define i8* @test(i8* %v) {
+ %A = alloca [4 x i8*], align 16 ; <[4 x i8*]*> [#uses=3]
+ %B = getelementptr [4 x i8*]* %A, i32 0, i32 0 ; <i8**> [#uses=1]
+ store i8* null, i8** %B
+ %C = bitcast [4 x i8*]* %A to { [16 x i8] }* ; <{ [16 x i8] }*> [#uses=1]
+ %D = getelementptr { [16 x i8] }* %C, i32 0, i32 0, i32 8 ; <i8*> [#uses=1]
+ %E = bitcast i8* %D to i8** ; <i8**> [#uses=1]
+ store i8* %v, i8** %E
+ %F = getelementptr [4 x i8*]* %A, i32 0, i32 2 ; <i8**> [#uses=1]
+ %G = load i8** %F ; <i8*> [#uses=1]
+ ret i8* %G
+}
diff --git a/test/Transforms/InstCombine/bitcount.ll b/test/Transforms/InstCombine/bitcount.ll
new file mode 100644
index 0000000..da53920
--- /dev/null
+++ b/test/Transforms/InstCombine/bitcount.ll
@@ -0,0 +1,17 @@
+; Tests to make sure bit counts of constants are folded
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {ret i32 19}
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: grep -v declare | not grep llvm.ct
+
+declare i32 @llvm.ctpop.i31(i31 %val)
+declare i32 @llvm.cttz.i32(i32 %val)
+declare i32 @llvm.ctlz.i33(i33 %val)
+
+define i32 @test(i32 %A) {
+ %c1 = call i32 @llvm.ctpop.i31(i31 12415124)
+ %c2 = call i32 @llvm.cttz.i32(i32 87359874)
+ %c3 = call i32 @llvm.ctlz.i33(i33 87359874)
+ %r1 = add i32 %c1, %c2
+ %r2 = add i32 %r1, %c3
+ ret i32 %r2
+}
diff --git a/test/Transforms/InstCombine/bittest.ll b/test/Transforms/InstCombine/bittest.ll
new file mode 100644
index 0000000..1ba4289
--- /dev/null
+++ b/test/Transforms/InstCombine/bittest.ll
@@ -0,0 +1,29 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -simplifycfg | llvm-dis |\
+; RUN: not grep {call void %abort}
+
+%b_rec.0 = external global int
+
+void %_Z12h000007_testv(uint *%P) {
+entry:
+ %tmp.2 = load int* %b_rec.0 ; <int> [#uses=1]
+ %tmp.9 = or int %tmp.2, -989855744 ; <int> [#uses=2]
+ %tmp.16 = and int %tmp.9, -805306369 ; <int> [#uses=2]
+ %tmp.17 = and int %tmp.9, -973078529 ; <int> [#uses=1]
+ store int %tmp.17, int* %b_rec.0
+ %tmp.17.shrunk = cast int %tmp.16 to uint ; <uint> [#uses=1]
+ %tmp.22 = and uint %tmp.17.shrunk, 3221225472 ; <uint> [#uses=1]
+ %tmp.23 = seteq uint %tmp.22, 3221225472 ; <bool> [#uses=1]
+ br bool %tmp.23, label %endif.0, label %then.0
+
+then.0: ; preds = %entry
+ tail call void %abort( )
+ unreachable
+
+endif.0: ; preds = %entry
+ %tmp.17.shrunk2 = cast int %tmp.16 to uint ; <uint> [#uses=1]
+ %tmp.27.mask = and uint %tmp.17.shrunk2, 100663295 ; <uint> [#uses=1]
+ store uint %tmp.27.mask, uint* %P
+ ret void
+}
+
+declare void %abort()
diff --git a/test/Transforms/InstCombine/bswap-fold.ll b/test/Transforms/InstCombine/bswap-fold.ll
new file mode 100644
index 0000000..1552386
--- /dev/null
+++ b/test/Transforms/InstCombine/bswap-fold.ll
@@ -0,0 +1,28 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep ret | wc -l | grep 3
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep call.*bswap
+
+bool %test1(ushort %tmp2) {
+ %tmp10 = call ushort %llvm.bswap.i16( ushort %tmp2 )
+ %tmp = seteq ushort %tmp10, 1
+ ret bool %tmp
+}
+
+bool %test2(uint %tmp) {
+ %tmp34 = tail call uint %llvm.bswap.i32( uint %tmp )
+ %tmp = seteq uint %tmp34, 1
+ ret bool %tmp
+}
+
+declare uint %llvm.bswap.i32(uint)
+
+bool %test3(ulong %tmp) {
+ %tmp34 = tail call ulong %llvm.bswap.i64( ulong %tmp )
+ %tmp = seteq ulong %tmp34, 1
+ ret bool %tmp
+}
+
+declare ulong %llvm.bswap.i64(ulong)
+
+declare ushort %llvm.bswap.i16(ushort)
diff --git a/test/Transforms/InstCombine/bswap.ll b/test/Transforms/InstCombine/bswap.ll
new file mode 100644
index 0000000..fde5ff0
--- /dev/null
+++ b/test/Transforms/InstCombine/bswap.ll
@@ -0,0 +1,62 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep {call.*llvm.bswap} | wc -l | grep 5
+; END.
+
+uint %test1(uint %i) {
+ %tmp1 = shr uint %i, ubyte 24 ; <uint> [#uses=1]
+ %tmp3 = shr uint %i, ubyte 8 ; <uint> [#uses=1]
+ %tmp4 = and uint %tmp3, 65280 ; <uint> [#uses=1]
+ %tmp5 = or uint %tmp1, %tmp4 ; <uint> [#uses=1]
+ %tmp7 = shl uint %i, ubyte 8 ; <uint> [#uses=1]
+ %tmp8 = and uint %tmp7, 16711680 ; <uint> [#uses=1]
+ %tmp9 = or uint %tmp5, %tmp8 ; <uint> [#uses=1]
+ %tmp11 = shl uint %i, ubyte 24 ; <uint> [#uses=1]
+ %tmp12 = or uint %tmp9, %tmp11 ; <uint> [#uses=1]
+ ret uint %tmp12
+}
+
+uint %test2(uint %arg) {
+ %tmp2 = shl uint %arg, ubyte 24 ; <uint> [#uses=1]
+ %tmp4 = shl uint %arg, ubyte 8 ; <uint> [#uses=1]
+ %tmp5 = and uint %tmp4, 16711680 ; <uint> [#uses=1]
+ %tmp6 = or uint %tmp2, %tmp5 ; <uint> [#uses=1]
+ %tmp8 = shr uint %arg, ubyte 8 ; <uint> [#uses=1]
+ %tmp9 = and uint %tmp8, 65280 ; <uint> [#uses=1]
+ %tmp10 = or uint %tmp6, %tmp9 ; <uint> [#uses=1]
+ %tmp12 = shr uint %arg, ubyte 24 ; <uint> [#uses=1]
+ %tmp14 = or uint %tmp10, %tmp12 ; <uint> [#uses=1]
+ ret uint %tmp14
+}
+
+ushort %test3(ushort %s) {
+ %tmp2 = shr ushort %s, ubyte 8
+ %tmp4 = shl ushort %s, ubyte 8
+ %tmp5 = or ushort %tmp2, %tmp4
+ ret ushort %tmp5
+}
+
+ushort %test4(ushort %s) {
+ %tmp2 = shr ushort %s, ubyte 8
+ %tmp4 = shl ushort %s, ubyte 8
+ %tmp5 = or ushort %tmp4, %tmp2
+ ret ushort %tmp5
+}
+
+; unsigned short test5(unsigned short a) {
+; return ((a & 0xff00) >> 8 | (a & 0x00ff) << 8);
+;}
+ushort %test5(ushort %a) {
+ %tmp = zext ushort %a to int
+ %tmp1 = and int %tmp, 65280
+ %tmp2 = ashr int %tmp1, ubyte 8
+ %tmp2 = trunc int %tmp2 to short
+ %tmp4 = and int %tmp, 255
+ %tmp5 = shl int %tmp4, ubyte 8
+ %tmp5 = trunc int %tmp5 to short
+ %tmp = or short %tmp2, %tmp5
+ %tmp6 = bitcast short %tmp to ushort
+ %tmp6 = zext ushort %tmp6 to int
+ %retval = trunc int %tmp6 to ushort
+ ret ushort %retval
+}
+
diff --git a/test/Transforms/InstCombine/call-cast-target.ll b/test/Transforms/InstCombine/call-cast-target.ll
new file mode 100644
index 0000000..6d96490
--- /dev/null
+++ b/test/Transforms/InstCombine/call-cast-target.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep call | not grep bitcast
+
+target endian = little
+target pointersize = 32
+target triple = "i686-pc-linux-gnu"
+
+implementation ; Functions:
+
+int %main() {
+entry:
+ %tmp = call int cast (sbyte* (int*)* %ctime to int (int*)*)( int* null )
+ ret int %tmp
+}
+
+declare sbyte* %ctime(int*)
diff --git a/test/Transforms/InstCombine/call-intrinsics.ll b/test/Transforms/InstCombine/call-intrinsics.ll
new file mode 100644
index 0000000..e854998
--- /dev/null
+++ b/test/Transforms/InstCombine/call-intrinsics.ll
@@ -0,0 +1,17 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis
+
+declare void %llvm.memmove.i32(sbyte*, sbyte*, uint, uint)
+declare void %llvm.memcpy.i32(sbyte*, sbyte*, uint, uint)
+declare void %llvm.memset.i32(sbyte*, ubyte, uint, uint)
+
+%X = global sbyte 0
+%Y = global sbyte 12
+
+void %zero_byte_test() {
+ ; These process zero bytes, so they are a noop.
+ call void %llvm.memmove.i32(sbyte* %X, sbyte* %Y, uint 0, uint 100)
+ call void %llvm.memcpy.i32(sbyte* %X, sbyte* %Y, uint 0, uint 100)
+ call void %llvm.memset.i32(sbyte* %X, ubyte 123, uint 0, uint 100)
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/call.ll b/test/Transforms/InstCombine/call.ll
new file mode 100644
index 0000000..1570165
--- /dev/null
+++ b/test/Transforms/InstCombine/call.ll
@@ -0,0 +1,58 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep call | notcast
+; END.
+
+implementation
+
+; Simple case, argument translatable without changing the value
+declare void %test1a(sbyte *%A)
+void %test1(int *%A) {
+ call void(int*)* cast (void(sbyte*)* %test1a to void(int*)*)(int* %A)
+ ret void
+}
+
+; More complex case, translate argument because of resolution. This is safe
+; because we have the body of the function
+void %test2a(sbyte %A) { ret void }
+int %test2(int %A) {
+ call void(int)* cast (void(sbyte)* %test2a to void(int)*)(int %A)
+ ret int %A
+}
+
+; Resolving this should insert a cast from sbyte to int, following the C
+; promotion rules.
+declare void %test3a(sbyte %A, ...)
+void %test3(sbyte %A, sbyte %B) {
+ call void(sbyte, sbyte)* cast (void(sbyte,...)* %test3a to void(sbyte,sbyte)*)(sbyte %A, sbyte %B)
+ ret void
+}
+
+; test conversion of return value...
+sbyte %test4a() { ret sbyte 0 }
+int %test4() {
+ %X = call int()* cast (sbyte()* %test4a to int()*)()
+ ret int %X
+}
+
+; test conversion of return value... no value conversion occurs so we can do
+; this with just a prototype...
+declare uint %test5a()
+int %test5() {
+ %X = call int()* cast (uint()* %test5a to int()*)()
+ ret int %X
+}
+
+; test addition of new arguments...
+declare int %test6a(int %X)
+int %test6() {
+ %X = call int()* cast (int(int)* %test6a to int()*)()
+ ret int %X
+}
+
+; test removal of arguments, only can happen with a function body
+void %test7a() { ret void }
+void %test7() {
+ call void(int)* cast (void()* %test7a to void(int)*)(int 5)
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/call2.ll b/test/Transforms/InstCombine/call2.ll
new file mode 100644
index 0000000..4ba840f
--- /dev/null
+++ b/test/Transforms/InstCombine/call2.ll
@@ -0,0 +1,27 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis
+
+; This used to crash trying to do a double-to-pointer conversion
+define i32 @bar() {
+entry:
+ %retval = alloca i32, align 4 ; <i32*> [#uses=1]
+ "alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp = call i32 (...)* bitcast (i32 (i8*)* @f to i32 (...)*)( double 3.000000e+00 ) ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load i32* %retval ; <i32> [#uses=1]
+ ret i32 %retval1
+}
+
+define i32 @f(i8* %p) {
+entry:
+ %p_addr = alloca i8* ; <i8**> [#uses=1]
+ %retval = alloca i32, align 4 ; <i32*> [#uses=1]
+ "alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store i8* %p, i8** %p_addr
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load i32* %retval ; <i32> [#uses=1]
+ ret i32 %retval1
+}
diff --git a/test/Transforms/InstCombine/canonicalize_branch.ll b/test/Transforms/InstCombine/canonicalize_branch.ll
new file mode 100644
index 0000000..032e293
--- /dev/null
+++ b/test/Transforms/InstCombine/canonicalize_branch.ll
@@ -0,0 +1,28 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep {setne\|setle\|setge}
+
+int %test1(uint %X, uint %Y) {
+ %C = setne uint %X, %Y
+ br bool %C, label %T, label %F
+T:
+ ret int 12
+F:
+ ret int 123
+}
+
+int %test2(uint %X, uint %Y) {
+ %C = setle uint %X, %Y
+ br bool %C, label %T, label %F
+T:
+ ret int 12
+F:
+ ret int 123
+}
+int %test3(uint %X, uint %Y) {
+ %C = setge uint %X, %Y
+ br bool %C, label %T, label %F
+T:
+ ret int 12
+F:
+ ret int 123
+}
diff --git a/test/Transforms/InstCombine/cast-and-cast.ll b/test/Transforms/InstCombine/cast-and-cast.ll
new file mode 100644
index 0000000..8da9d33
--- /dev/null
+++ b/test/Transforms/InstCombine/cast-and-cast.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep bitcast
+
+bool %test1(uint %val) {
+ %t1 = bitcast uint %val to int
+ %t2 = and int %t1, 1
+ %t3 = trunc int %t2 to bool
+ ret bool %t3
+}
+
+short %test1(uint %val) {
+ %t1 = bitcast uint %val to int
+ %t2 = and int %t1, 1
+ %t3 = trunc int %t2 to short
+ ret short %t3
+}
diff --git a/test/Transforms/InstCombine/cast-cast-to-and.ll b/test/Transforms/InstCombine/cast-cast-to-and.ll
new file mode 100644
index 0000000..97386bb
--- /dev/null
+++ b/test/Transforms/InstCombine/cast-cast-to-and.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep ubyte
+
+int %test1(uint %X) {
+ %Y = cast uint %X to ubyte ;; Turn into an AND
+ %Z = cast ubyte %Y to int
+ ret int %Z
+}
+
diff --git a/test/Transforms/InstCombine/cast-load-gep.ll b/test/Transforms/InstCombine/cast-load-gep.ll
new file mode 100644
index 0000000..4fa63dd
--- /dev/null
+++ b/test/Transforms/InstCombine/cast-load-gep.ll
@@ -0,0 +1,23 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -globaldce | llvm-dis | \
+; RUN: not grep Array
+
+; Pulling the cast out of the load allows us to eliminate the load, and then
+; the whole array.
+
+%unop = type {int }
+%op = type {float}
+
+%Array = internal constant [1 x %op* (%op*)*] [ %op* (%op*)* %foo ]
+
+implementation
+
+%op* %foo(%op* %X) {
+ ret %op* %X
+}
+
+%unop* %caller(%op* %O) {
+ %tmp = load %unop* (%op*)** cast ([1 x %op* (%op*)*]* %Array to %unop* (%op*)**)
+ %tmp.2 = call %unop* (%op*)* %tmp(%op* %O)
+ ret %unop* %tmp.2
+}
+
diff --git a/test/Transforms/InstCombine/cast-malloc.ll b/test/Transforms/InstCombine/cast-malloc.ll
new file mode 100644
index 0000000..565787c
--- /dev/null
+++ b/test/Transforms/InstCombine/cast-malloc.ll
@@ -0,0 +1,13 @@
+; test that casted mallocs get converted to malloc of the right type
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep bitcast
+
+; The target datalayout is important for this test case. We have to tell
+; instcombine that the ABI alignment for a long is 4-bytes, not 8, otherwise
+; it won't do the transform.
+target datalayout = "e-i64:32:64"
+int* %test(uint %size) {
+ %X = malloc long, uint %size
+ %ret = bitcast long* %X to int*
+ ret int* %ret
+}
diff --git a/test/Transforms/InstCombine/cast-propagate.ll b/test/Transforms/InstCombine/cast-propagate.ll
new file mode 100644
index 0000000..d556473
--- /dev/null
+++ b/test/Transforms/InstCombine/cast-propagate.ll
@@ -0,0 +1,10 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -mem2reg | llvm-dis | \
+; RUN: not grep load
+
+int %test1(uint* %P) {
+ %A = alloca uint
+ store uint 123, uint* %A
+ %Q = cast uint* %A to int* ; Cast the result of the load not the source
+ %V = load int* %Q
+ ret int %V
+}
diff --git a/test/Transforms/InstCombine/cast-set.ll b/test/Transforms/InstCombine/cast-set.ll
new file mode 100644
index 0000000..801aa1b
--- /dev/null
+++ b/test/Transforms/InstCombine/cast-set.ll
@@ -0,0 +1,49 @@
+; This tests for various complex cast elimination cases instcombine should
+; handle.
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | notcast
+
+bool %test1(int %X) {
+ %A = cast int %X to uint
+ %c = setne uint %A, 12 ; Convert to setne int %X, 12
+ ret bool %c
+}
+
+bool %test2(int %X, int %Y) {
+ %A = cast int %X to uint
+ %B = cast int %Y to uint
+ %c = setne uint %A, %B ; Convert to setne int %X, %Y
+ ret bool %c
+}
+
+int %test4(int %A) {
+ %B = cast int %A to uint
+ %C = shl uint %B, ubyte 2
+ %D = cast uint %C to int
+ ret int %D
+}
+
+short %test5(short %A) {
+ %B = cast short %A to uint
+ %C = and uint %B, 15
+ %D = cast uint %C to short
+ ret short %D
+}
+
+bool %test6(bool %A) {
+ %B = cast bool %A to int
+ %C = setne int %B, 0
+ ret bool %C
+}
+
+bool %test6a(bool %A) {
+ %B = cast bool %A to int
+ %C = setne int %B, -1 ; Always true!
+ ret bool %C
+}
+
+bool %test7(sbyte* %A) {
+ %B = cast sbyte* %A to int*
+ %C = seteq int* %B, null
+ ret bool %C
+}
diff --git a/test/Transforms/InstCombine/cast.ll b/test/Transforms/InstCombine/cast.ll
new file mode 100644
index 0000000..156886f
--- /dev/null
+++ b/test/Transforms/InstCombine/cast.ll
@@ -0,0 +1,230 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep %c | notcast
+; END.
+
+%inbuf = external global [32832 x ubyte]
+
+implementation
+
+int %test1(int %A) {
+ %c1 = cast int %A to uint
+ %c2 = cast uint %c1 to int
+ ret int %c2
+}
+
+ulong %test2(ubyte %A) {
+ %c1 = cast ubyte %A to ushort
+ %c2 = cast ushort %c1 to uint
+ %Ret = cast uint %c2 to ulong
+ ret ulong %Ret
+}
+
+ulong %test3(ulong %A) { ; This function should just use bitwise AND
+ %c1 = cast ulong %A to ubyte
+ %c2 = cast ubyte %c1 to ulong
+ ret ulong %c2
+}
+
+uint %test4(int %A, int %B) {
+ %COND = setlt int %A, %B
+ %c = cast bool %COND to ubyte ; Booleans are unsigned integrals
+ %result = cast ubyte %c to uint ; for the cast elim purpose
+ ret uint %result
+}
+
+int %test5(bool %B) {
+ %c = cast bool %B to ubyte ; This cast should get folded into
+ %result = cast ubyte %c to int ; this cast
+ ret int %result
+}
+
+int %test6(ulong %A) {
+ %c1 = cast ulong %A to uint
+ %res = cast uint %c1 to int
+ ret int %res
+}
+
+long %test7(bool %A) {
+ %c1 = cast bool %A to int
+ %res = cast int %c1 to long
+ ret long %res
+}
+
+long %test8(sbyte %A) {
+ %c1 = cast sbyte %A to ulong
+ %res = cast ulong %c1 to long
+ ret long %res
+}
+
+short %test9(short %A) {
+ %c1 = cast short %A to int
+ %c2 = cast int %c1 to short
+ ret short %c2
+}
+
+short %test10(short %A) {
+ %c1 = cast short %A to uint
+ %c2 = cast uint %c1 to short
+ ret short %c2
+}
+
+declare void %varargs(int, ...)
+
+void %test11(int* %P) {
+ %c = cast int* %P to short*
+ call void(int, ...)* %varargs(int 5, short* %c)
+ ret void
+}
+
+int* %test12() {
+ %p = malloc [4 x sbyte]
+ %c = cast [4 x sbyte]* %p to int*
+ ret int* %c
+}
+
+ubyte *%test13(long %A) {
+ %c = getelementptr [0 x ubyte]* cast ([32832 x ubyte]* %inbuf to [0 x ubyte]*), long 0, long %A
+ ret ubyte* %c
+}
+
+bool %test14(sbyte %A) {
+ %c = cast sbyte %A to ubyte
+ %X = setlt ubyte %c, 128 ; setge %A, 0
+ ret bool %X
+}
+
+; This just won't occur when there's no difference between ubyte and sbyte
+;bool %test15(ubyte %A) {
+; %c = cast ubyte %A to sbyte
+; %X = setlt sbyte %c, 0 ; setgt %A, 127
+; ret bool %X
+;}
+
+bool %test16(int* %P) {
+ %c = cast int* %P to bool ;; setne P, null
+ ret bool %c
+}
+
+short %test17(bool %tmp3) {
+ %c = cast bool %tmp3 to int
+ %t86 = cast int %c to short
+ ret short %t86
+}
+
+short %test18(sbyte %tmp3) {
+ %c = cast sbyte %tmp3 to int
+ %t86 = cast int %c to short
+ ret short %t86
+}
+
+bool %test19(int %X) {
+ %c = cast int %X to long
+ %Z = setlt long %c, 12345
+ ret bool %Z
+}
+
+bool %test20(bool %B) {
+ %c = cast bool %B to int
+ %D = setlt int %c, -1
+ ret bool %D ;; false
+}
+
+uint %test21(uint %X) {
+ %c1 = cast uint %X to sbyte
+ %c2 = cast sbyte %c1 to uint ;; sext -> zext -> and -> nop
+ %RV = and uint %c2, 255
+ ret uint %RV
+}
+
+uint %test22(uint %X) {
+ %c1 = cast uint %X to sbyte
+ %c2 = cast sbyte %c1 to uint ;; sext -> zext -> and -> nop
+ %RV = shl uint %c2, ubyte 24
+ ret uint %RV
+}
+
+int %test23(int %X) {
+ %c1 = cast int %X to ushort ;; Turn into an AND even though X
+ %c2 = cast ushort %c1 to int ;; and Z are signed.
+ ret int %c2
+}
+
+bool %test24(bool %C) {
+ %X = select bool %C, uint 14, uint 1234
+ %c = cast uint %X to bool ;; Fold cast into select
+ ret bool %c
+}
+
+void %test25(int** %P) {
+ %c = cast int** %P to float**
+ store float* null, float** %c ;; Fold cast into null
+ ret void
+}
+
+int %test26(float %F) {
+ %c = cast float %F to double ;; no need to cast from float->double.
+ %D = cast double %c to int
+ ret int %D
+}
+
+[4 x float]* %test27([9 x [4 x float]]* %A) {
+ %c = cast [9 x [4 x float]]* %A to [4 x float]*
+ ret [4 x float]* %c
+}
+
+float* %test28([4 x float]* %A) {
+ %c = cast [4 x float]* %A to float*
+ ret float* %c
+}
+
+uint %test29(uint %c1, uint %c2) {
+ %tmp1 = cast uint %c1 to ubyte
+ %tmp4.mask = cast uint %c2 to ubyte
+ %tmp = or ubyte %tmp4.mask, %tmp1
+ %tmp10 = cast ubyte %tmp to uint
+ ret uint %tmp10
+}
+
+uint %test30(uint %c1) {
+ %c2 = cast uint %c1 to ubyte
+ %c3 = xor ubyte %c2, 1
+ %c4 = cast ubyte %c3 to uint
+ ret uint %c4
+}
+
+bool %test31(ulong %A) {
+ %B = cast ulong %A to int
+ %C = and int %B, 42
+ %D = seteq int %C, 10
+ ret bool %D
+}
+
+
+void %test32(double** %tmp) {
+ %tmp8 = malloc [16 x sbyte]
+ %tmp8 = cast [16 x sbyte]* %tmp8 to double*
+ store double* %tmp8, double** %tmp
+ ret void
+}
+
+uint %test33(uint %c1) {
+ %x = bitcast uint %c1 to float
+ %y = bitcast float %x to uint
+ ret uint %y
+}
+
+ushort %test34(ushort %a) {
+ %c1 = zext ushort %a to int
+ %tmp21 = lshr int %c1, ubyte 8
+ %c2 = trunc int %tmp21 to ushort
+ ret ushort %c2
+}
+
+ushort %test35(ushort %a) {
+ %c1 = bitcast ushort %a to short
+ %tmp2 = lshr short %c1, ubyte 8
+ %c2 = bitcast short %tmp2 to ushort
+ ret ushort %c2
+}
+
diff --git a/test/Transforms/InstCombine/cast2.ll b/test/Transforms/InstCombine/cast2.ll
new file mode 100644
index 0000000..5cc9087
--- /dev/null
+++ b/test/Transforms/InstCombine/cast2.ll
@@ -0,0 +1,29 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | notcast
+
+define i16 @test1(i16 %a) {
+ %tmp = zext i16 %a to i32 ; <i32> [#uses=2]
+ %tmp21 = lshr i32 %tmp, 8 ; <i32> [#uses=1]
+ %tmp5 = shl i32 %tmp, 8 ; <i32> [#uses=1]
+ %tmp.upgrd.32 = or i32 %tmp21, %tmp5 ; <i32> [#uses=1]
+ %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16 ; <i16> [#uses=1]
+ ret i16 %tmp.upgrd.3
+}
+
+define i16 @test2(i16 %a) {
+ %tmp = zext i16 %a to i32 ; <i32> [#uses=2]
+ %tmp21 = lshr i32 %tmp, 9 ; <i32> [#uses=1]
+ %tmp5 = shl i32 %tmp, 8 ; <i32> [#uses=1]
+ %tmp.upgrd.32 = or i32 %tmp21, %tmp5 ; <i32> [#uses=1]
+ %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16 ; <i16> [#uses=1]
+ ret i16 %tmp.upgrd.3
+}
+
+; PR1263
+define i32* @test3(i32* %tmp1) {
+ %tmp64 = bitcast i32* %tmp1 to { i32 }* ; <{ i32 }*> [#uses=1]
+ %tmp65 = getelementptr { i32 }* %tmp64, i32 0, i32 0 ; <i32*> [#uses=1]
+ ret i32* %tmp65
+}
+
+
diff --git a/test/Transforms/InstCombine/cast_ptr.ll b/test/Transforms/InstCombine/cast_ptr.ll
new file mode 100644
index 0000000..c067988
--- /dev/null
+++ b/test/Transforms/InstCombine/cast_ptr.ll
@@ -0,0 +1,20 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | notcast
+
+target pointersize = 32
+
+implementation
+
+sbyte* %test1(sbyte* %t) {
+ %tmpc = cast sbyte* %t to uint
+ %tmpa = add uint %tmpc, 32
+ %tv = cast uint %tmpa to sbyte*
+ ret sbyte* %tv
+}
+
+bool %test2(sbyte* %a, sbyte* %b) {
+%tmpa = cast sbyte* %a to uint
+%tmpb = cast sbyte* %b to uint
+%r = seteq uint %tmpa, %tmpb
+ret bool %r
+}
diff --git a/test/Transforms/InstCombine/deadcode.ll b/test/Transforms/InstCombine/deadcode.ll
new file mode 100644
index 0000000..370390a
--- /dev/null
+++ b/test/Transforms/InstCombine/deadcode.ll
@@ -0,0 +1,13 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep {ret i32 %A}
+
+int %test(int %A) {
+ %X = or bool false, false
+ br bool %X, label %T, label %C
+T:
+ %B = add int %A, 1
+ br label %C
+C:
+ %C = phi int [%B, %T], [%A, %0]
+ ret int %C
+}
diff --git a/test/Transforms/InstCombine/dg.exp b/test/Transforms/InstCombine/dg.exp
new file mode 100644
index 0000000..879685c
--- /dev/null
+++ b/test/Transforms/InstCombine/dg.exp
@@ -0,0 +1,3 @@
+load_lib llvm.exp
+
+RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,llx,c,cpp,tr}]]
diff --git a/test/Transforms/InstCombine/div.ll b/test/Transforms/InstCombine/div.ll
new file mode 100644
index 0000000..97f331c
--- /dev/null
+++ b/test/Transforms/InstCombine/div.ll
@@ -0,0 +1,69 @@
+; This test makes sure that div instructions are properly eliminated.
+;
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep div
+; END.
+
+implementation
+
+int %test1(int %A) {
+ %B = div int %A, 1
+ ret int %B
+}
+
+uint %test2(uint %A) {
+ %B = div uint %A, 8 ; => Shift
+ ret uint %B
+}
+
+int %test3(int %A) {
+ %B = div int 0, %A ; => 0, don't need to keep traps
+ ret int %B
+}
+
+int %test4(int %A) {
+ %B = div int %A, -1 ; 0-A
+ ret int %B
+}
+
+uint %test5(uint %A) {
+ %B = div uint %A, 4294967280
+ %C = div uint %B, 4294967292
+ ret uint %C
+}
+
+bool %test6(uint %A) {
+ %B = div uint %A, 123
+ %C = seteq uint %B, 0 ; A < 123
+ ret bool %C
+}
+
+bool %test7(uint %A) {
+ %B = div uint %A, 10
+ %C = seteq uint %B, 2 ; A >= 20 && A < 30
+ ret bool %C
+}
+
+bool %test8(ubyte %A) {
+ %B = div ubyte %A, 123
+ %C = seteq ubyte %B, 2 ; A >= 246
+ ret bool %C
+}
+
+bool %test9(ubyte %A) {
+ %B = div ubyte %A, 123
+ %C = setne ubyte %B, 2 ; A < 246
+ ret bool %C
+}
+
+uint %test10(uint %X, bool %C) {
+ %V = select bool %C, uint 64, uint 8
+ %R = udiv uint %X, %V
+ ret uint %R
+}
+
+int %test11(int %X, bool %C) {
+ %A = select bool %C, int 1024, int 32
+ %B = udiv int %X, %A
+ ret int %B
+}
diff --git a/test/Transforms/InstCombine/fpcast.ll b/test/Transforms/InstCombine/fpcast.ll
new file mode 100644
index 0000000..0c38767
--- /dev/null
+++ b/test/Transforms/InstCombine/fpcast.ll
@@ -0,0 +1,14 @@
+; Test some floating point casting cases
+; RUN: llvm-upgrade %s -o - | llvm-as | opt -instcombine | llvm-dis | notcast
+; RUN: llvm-upgrade %s -o - | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: egrep {ret i8 \(-1\)\|\(255\)}
+
+sbyte %test1() {
+ %x = fptoui float 255.0 to sbyte
+ ret sbyte %x
+}
+
+ubyte %test2() {
+ %x = fptosi float -1.0 to ubyte
+ ret ubyte %x
+}
diff --git a/test/Transforms/InstCombine/getelementptr-setcc.ll b/test/Transforms/InstCombine/getelementptr-setcc.ll
new file mode 100644
index 0000000..55102f4
--- /dev/null
+++ b/test/Transforms/InstCombine/getelementptr-setcc.ll
@@ -0,0 +1,34 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep getelementptr
+; END.
+
+bool %test1(short* %P, int %I, int %J) {
+ %X = getelementptr short* %P, int %I
+ %Y = getelementptr short* %P, int %J
+
+ %C = setlt short* %X, %Y
+ ret bool %C
+}
+
+bool %test2(short* %P, int %I) {
+ %X = getelementptr short* %P, int %I
+
+ %C = setlt short* %X, %P
+ ret bool %C
+}
+
+int %test3(int* %P, int %A, int %B) {
+ %tmp.4 = getelementptr int* %P, int %A ; <int*> [#uses=1]
+ %tmp.9 = getelementptr int* %P, int %B ; <int*> [#uses=1]
+ %tmp.10 = seteq int* %tmp.4, %tmp.9 ; <bool> [#uses=1]
+ %tmp.11 = cast bool %tmp.10 to int ; <int> [#uses=1]
+ ret int %tmp.11
+}
+
+int %test4(int* %P, int %A, int %B) {
+ %tmp.4 = getelementptr int* %P, int %A ; <int*> [#uses=1]
+ %tmp.6 = seteq int* %tmp.4, %P ; <bool> [#uses=1]
+ %tmp.7 = cast bool %tmp.6 to int ; <int> [#uses=1]
+ ret int %tmp.7
+}
+
diff --git a/test/Transforms/InstCombine/getelementptr.ll b/test/Transforms/InstCombine/getelementptr.ll
new file mode 100644
index 0000000..7e88092
--- /dev/null
+++ b/test/Transforms/InstCombine/getelementptr.ll
@@ -0,0 +1,76 @@
+; The %A getelementptr instruction should be eliminated here
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep -v %B | not grep getelementptr
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep foo1
+; END.
+
+%Global = constant [10 x sbyte] c"helloworld"
+
+implementation
+
+int *%foo1(int* %I) { ; Test noop elimination
+ %A = getelementptr int* %I, long 0
+ ret int * %A
+}
+
+int* %foo2(int* %I) { ; Test noop elimination
+ %A = getelementptr int* %I
+ ret int* %A
+}
+int* %foo3(int * %I) { ; Test that two array indexing geps fold
+ %A = getelementptr int* %I, long 17
+ %B = getelementptr int* %A, long 4
+ ret int* %B
+}
+
+int* %foo4({int} *%I) { ; Test that two getelementptr insts fold
+ %A = getelementptr {int}* %I, long 1
+ %B = getelementptr {int}* %A, long 0, uint 0
+ ret int* %B
+}
+
+void %foo5(sbyte %B) {
+ ; This should be turned into a constexpr instead of being an instruction
+ %A = getelementptr [10 x sbyte]* %Global, long 0, long 4
+ store sbyte %B, sbyte* %A
+ ret void
+}
+
+int* %foo6() {
+ %M = malloc [4 x int]
+ %A = getelementptr [4 x int]* %M, long 0, long 0
+ %B = getelementptr int* %A, long 2
+ ret int* %B
+}
+
+int* %foo7(int* %I, long %C, long %D) {
+ %A = getelementptr int* %I, long %C
+ %B = getelementptr int* %A, long %D
+ ret int* %B
+}
+
+sbyte* %foo8([10 x int]* %X) {
+ %A = getelementptr [10 x int]* %X, long 0, long 0 ;; Fold into the cast.
+ %B = cast int* %A to sbyte*
+ ret sbyte * %B
+}
+
+int %test9() {
+ %A = getelementptr {int, double}* null, int 0, uint 1
+ %B = cast double* %A to int
+ ret int %B
+}
+
+bool %test10({int, int} * %x, {int, int} * %y) {
+ %tmp.1 = getelementptr {int,int}* %x, int 0, uint 1
+ %tmp.3 = getelementptr {int,int}* %y, int 0, uint 1
+ %tmp.4 = seteq int* %tmp.1, %tmp.3 ;; seteq x, y
+ ret bool %tmp.4
+}
+
+bool %test11({int,int} *%X) {
+ %P = getelementptr {int,int}* %X, int 0, uint 0
+ %Q = seteq int* %P, null
+ ret bool %Q
+}
diff --git a/test/Transforms/InstCombine/getelementptr_cast.ll b/test/Transforms/InstCombine/getelementptr_cast.ll
new file mode 100644
index 0000000..5a0d132
--- /dev/null
+++ b/test/Transforms/InstCombine/getelementptr_cast.ll
@@ -0,0 +1,11 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: notcast {} {getelementptr.*}
+
+%G = external global [3 x sbyte]
+
+implementation
+
+ubyte *%foo(uint %Idx) {
+ %tmp = getelementptr ubyte* cast ([3 x sbyte]* %G to ubyte*), uint %Idx
+ ret ubyte* %tmp
+}
diff --git a/test/Transforms/InstCombine/getelementptr_const.ll b/test/Transforms/InstCombine/getelementptr_const.ll
new file mode 100644
index 0000000..e4265f4
--- /dev/null
+++ b/test/Transforms/InstCombine/getelementptr_const.ll
@@ -0,0 +1,14 @@
+; Test folding of constantexpr geps into normal geps.
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -gcse -instcombine | \
+; RUN: llvm-dis | not grep getelementptr
+
+%Array = external global [40 x int]
+
+int %test(long %X) {
+ %A = getelementptr int* getelementptr ([40 x int]* %Array, long 0, long 0), long %X
+ %B = getelementptr [40 x int]* %Array, long 0, long %X
+ %a = cast int* %A to int
+ %b = cast int* %B to int
+ %c = sub int %a, %b
+ ret int %c
+}
diff --git a/test/Transforms/InstCombine/getelementptr_index.ll b/test/Transforms/InstCombine/getelementptr_index.ll
new file mode 100644
index 0000000..c4a7317
--- /dev/null
+++ b/test/Transforms/InstCombine/getelementptr_index.ll
@@ -0,0 +1,10 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep trunc
+
+target endian = little
+target pointersize = 32
+
+int* %test(int* %X, long %Idx) {
+ ; Should insert a cast to int on this target
+ %R = getelementptr int* %X, long %Idx
+ ret int* %R
+}
diff --git a/test/Transforms/InstCombine/hoist_instr.ll b/test/Transforms/InstCombine/hoist_instr.ll
new file mode 100644
index 0000000..cfe704d
--- /dev/null
+++ b/test/Transforms/InstCombine/hoist_instr.ll
@@ -0,0 +1,17 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: %prcontext div 1 | grep then:
+
+;; This tests that the div is hoisted into the then block.
+
+int %foo(bool %C, int %A, int %B) {
+entry:
+ br bool %C, label %then, label %endif
+
+then:
+ br label %endif
+
+endif:
+ %X = phi int [%A, %then], [15, %entry]
+ %Y = div int %X, 42
+ ret int %Y
+}
diff --git a/test/Transforms/InstCombine/icmp.ll b/test/Transforms/InstCombine/icmp.ll
new file mode 100644
index 0000000..bb69ba3
--- /dev/null
+++ b/test/Transforms/InstCombine/icmp.ll
@@ -0,0 +1,31 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep icmp
+; END.
+
+define i32 @test1(i32 %X) {
+entry:
+ icmp slt i32 %X, 0 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+}
+
+define i32 @test2(i32 %X) {
+entry:
+ icmp ult i32 %X, -2147483648 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+}
+
+define i32 @test3(i32 %X) {
+entry:
+ icmp slt i32 %X, 0 ; <i1>:0 [#uses=1]
+ sext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+}
+
+define i32 @test4(i32 %X) {
+entry:
+ icmp ult i32 %X, -2147483648 ; <i1>:0 [#uses=1]
+ sext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+}
+
diff --git a/test/Transforms/InstCombine/load.ll b/test/Transforms/InstCombine/load.ll
new file mode 100644
index 0000000..d1b8eda
--- /dev/null
+++ b/test/Transforms/InstCombine/load.ll
@@ -0,0 +1,74 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep load
+; END.
+
+%X = constant int 42
+%X2 = constant int 47
+%Y = constant [2 x { int, float }] [ { int, float } { int 12, float 1.0 },
+ { int, float } { int 37, float 1.2312 } ]
+%Z = constant [2 x { int, float }] zeroinitializer
+
+int %test1() {
+ %B = load int* %X
+ ret int %B
+}
+
+float %test2() {
+ %A = getelementptr [2 x { int, float}]* %Y, long 0, long 1, uint 1
+ %B = load float* %A
+ ret float %B
+}
+
+
+int %test3() {
+ %A = getelementptr [2 x { int, float}]* %Y, long 0, long 0, uint 0
+ %B = load int* %A
+ ret int %B
+}
+
+int %test4() {
+ %A = getelementptr [2 x { int, float}]* %Z, long 0, long 1, uint 0
+ %B = load int* %A
+ ret int %B
+}
+
+; load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2)
+int %test5(bool %C) {
+ %Y = select bool %C, int* %X, int* %X2
+ %Z = load int* %Y
+ ret int %Z
+}
+
+int %test7(int %X) {
+ %V = getelementptr int* null, int %X
+ %R = load int* %V
+ ret int %R
+}
+
+int %test8(int* %P) {
+ store int 1, int* %P
+ %X = load int* %P ;; Trivial store->load forwarding
+ ret int %X
+}
+
+int %test9(int* %P) {
+ %X = load int* %P ;; Trivial load cse
+ %Y = load int* %P
+ %Z = sub int %X, %Y
+ ret int %Z
+}
+
+int %test10(bool %C, int* %P, int* %Q) {
+ br bool %C, label %T, label %F
+T:
+ store int 1, int* %Q
+ store int 0, int* %P
+ br label %C
+F:
+ store int 0, int* %P
+ br label %C
+C:
+ %V = load int* %P ;; always 0
+ ret int %V
+}
diff --git a/test/Transforms/InstCombine/malloc-free-delete.ll b/test/Transforms/InstCombine/malloc-free-delete.ll
new file mode 100644
index 0000000..24f793f
--- /dev/null
+++ b/test/Transforms/InstCombine/malloc-free-delete.ll
@@ -0,0 +1,11 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {ret i32 0}
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep malloc
+; PR1201
+define i32 @main(i32 %argc, i8** %argv) {
+ %c_19 = alloca i8* ; <i8**> [#uses=2]
+ %malloc_206 = malloc i8, i32 10 ; <i8*> [#uses=1]
+ store i8* %malloc_206, i8** %c_19
+ %tmp_207 = load i8** %c_19 ; <i8*> [#uses=1]
+ free i8* %tmp_207
+ ret i32 0
+}
diff --git a/test/Transforms/InstCombine/malloc.ll b/test/Transforms/InstCombine/malloc.ll
new file mode 100644
index 0000000..6a4601a
--- /dev/null
+++ b/test/Transforms/InstCombine/malloc.ll
@@ -0,0 +1,7 @@
+; test that malloc's with a constant argument are promoted to array allocations
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep getelementptr
+
+define i32* @test() {
+ %X = malloc i32, i32 4
+ ret i32* %X
+}
diff --git a/test/Transforms/InstCombine/malloc2.ll b/test/Transforms/InstCombine/malloc2.ll
new file mode 100644
index 0000000..eb7c9ab
--- /dev/null
+++ b/test/Transforms/InstCombine/malloc2.ll
@@ -0,0 +1,19 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {ret i32 0}
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep malloc
+; PR1313
+
+define i32 @test1(i32 %argc, i8* %argv, i8* %envp) {
+ %tmp15.i.i.i23 = malloc [2564 x i32] ; <[2564 x i32]*> [#uses=1]
+ %c = icmp eq [2564 x i32]* %tmp15.i.i.i23, null ; <i1>:0 [#uses=1]
+ %retval = zext i1 %c to i32 ; <i32> [#uses=1]
+ ret i32 %retval
+}
+
+define i32 @test2(i32 %argc, i8* %argv, i8* %envp) {
+ %tmp15.i.i.i23 = malloc [2564 x i32] ; <[2564 x i32]*> [#uses=1]
+ %X = bitcast [2564 x i32]* %tmp15.i.i.i23 to i32*
+ %c = icmp ne i32* %X, null
+ %retval = zext i1 %c to i32 ; <i32> [#uses=1]
+ ret i32 %retval
+}
+
diff --git a/test/Transforms/InstCombine/memmove.ll b/test/Transforms/InstCombine/memmove.ll
new file mode 100644
index 0000000..52ed592
--- /dev/null
+++ b/test/Transforms/InstCombine/memmove.ll
@@ -0,0 +1,23 @@
+; This test makes sure that memmove instructions are properly eliminated.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep {call void @llvm.memmove}
+
+%S = internal constant [33 x sbyte] c"panic: restorelist inconsistency\00"
+
+implementation
+
+declare void %llvm.memmove.i32(sbyte*, sbyte*, uint, uint)
+
+void %test1(sbyte* %A, sbyte* %B, uint %N) {
+ ;; 0 bytes -> noop.
+ call void %llvm.memmove.i32(sbyte* %A, sbyte* %B, uint 0, uint 1)
+ ret void
+}
+
+void %test2(sbyte *%A, uint %N) {
+ ;; dest can't alias source since we can't write to source!
+ call void %llvm.memmove.i32(sbyte* %A, sbyte* getelementptr ([33 x sbyte]* %S, int 0, int 0),
+ uint %N, uint 1)
+ ret void
+}
diff --git a/test/Transforms/InstCombine/mul.ll b/test/Transforms/InstCombine/mul.ll
new file mode 100644
index 0000000..1d5c286
--- /dev/null
+++ b/test/Transforms/InstCombine/mul.ll
@@ -0,0 +1,74 @@
+; This test makes sure that mul instructions are properly eliminated.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep mul
+; END.
+
+implementation
+
+int %test1(int %A) {
+ %B = mul int %A, 1
+ ret int %B
+}
+
+int %test2(int %A) {
+ %B = mul int %A, 2 ; Should convert to an add instruction
+ ret int %B
+}
+
+int %test3(int %A) {
+ %B = mul int %A, 0 ; This should disappear entirely
+ ret int %B
+}
+
+double %test4(double %A) {
+ %B = mul double 1.0, %A ; This is safe for FP
+ ret double %B
+}
+
+int %test5(int %A) {
+ %B = mul int %A, 8
+ ret int %B
+}
+
+ubyte %test6(ubyte %A) {
+ %B = mul ubyte %A, 8
+ %C = mul ubyte %B, 8
+ ret ubyte %C
+}
+
+int %test7(int %i) {
+ %tmp = mul int %i, -1 ; %tmp = sub 0, %i
+ ret int %tmp
+}
+
+ulong %test8(ulong %i) {
+ %j = mul ulong %i, 18446744073709551615 ; tmp = sub 0, %i
+ ret ulong %j
+}
+
+uint %test9(uint %i) {
+ %j = mul uint %i, 4294967295 ; %j = sub 0, %i
+ ret uint %j
+}
+
+uint %test10(int %a, uint %b) {
+ %c = setlt int %a, 0
+ %d = cast bool %c to uint
+ %e = mul uint %d, %b ; e = b & (a >> 31)
+ ret uint %e
+}
+
+uint %test11(int %a, uint %b) {
+ %c = setle int %a, -1
+ %d = cast bool %c to uint
+ %e = mul uint %d, %b ; e = b & (a >> 31)
+ ret uint %e
+}
+
+uint %test12(ubyte %a, uint %b) {
+ %c = setgt ubyte %a, 127
+ %d = cast bool %c to uint
+ %e = mul uint %d, %b ; e = b & (a >> 31)
+ ret uint %e
+}
+
diff --git a/test/Transforms/InstCombine/narrow.ll b/test/Transforms/InstCombine/narrow.ll
new file mode 100644
index 0000000..02d7e31
--- /dev/null
+++ b/test/Transforms/InstCombine/narrow.ll
@@ -0,0 +1,17 @@
+; This file contains various testcases that check to see that instcombine
+; is narrowing computations when possible.
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep {ret i1 false}
+
+; test1 - Eliminating the casts in this testcase (by narrowing the AND
+; operation) allows instcombine to realize the function always returns false.
+;
+bool %test1(int %A, int %B) {
+ %C1 = setlt int %A, %B
+ %ELIM1 = zext bool %C1 to uint
+ %C2 = setgt int %A, %B
+ %ELIM2 = zext bool %C2 to uint
+ %C3 = and uint %ELIM1, %ELIM2
+ %ELIM3 = trunc uint %C3 to bool
+ ret bool %ELIM3
+}
diff --git a/test/Transforms/InstCombine/not.ll b/test/Transforms/InstCombine/not.ll
new file mode 100644
index 0000000..3e85692
--- /dev/null
+++ b/test/Transforms/InstCombine/not.ll
@@ -0,0 +1,45 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep xor
+
+implementation
+
+int %test1(int %A) {
+ %B = xor int %A, -1
+ %C = xor int %B, -1
+ ret int %C
+}
+
+bool %test2(int %A, int %B) {
+ %cond = setle int %A, %B ; Can change into setge
+ %Ret = xor bool %cond, true
+ ret bool %Ret
+}
+
+
+; Test that demorgans law can be instcombined
+int %test3(int %A, int %B) {
+ %a = xor int %A, -1
+ %b = xor int %B, -1
+ %c = and int %a, %b
+ %d = xor int %c, -1
+ ret int %d
+}
+
+; Test that demorgens law can work with constants
+int %test4(int %A, int %B) {
+ %a = xor int %A, -1
+ %c = and int %a, 5 ; 5 = ~c2
+ %d = xor int %c, -1
+ ret int %d
+}
+
+; test the mirror of demorgans law...
+int %test5(int %A, int %B) {
+ %a = xor int %A, -1
+ %b = xor int %B, -1
+ %c = or int %a, %b
+ %d = xor int %c, -1
+ ret int %d
+}
diff --git a/test/Transforms/InstCombine/or.ll b/test/Transforms/InstCombine/or.ll
new file mode 100644
index 0000000..3fc225a
--- /dev/null
+++ b/test/Transforms/InstCombine/or.ll
@@ -0,0 +1,158 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep -v xor | not grep {or }
+; END.
+
+implementation
+
+int %test1(int %A) {
+ %B = or int %A, 0
+ ret int %B
+}
+
+int %test2(int %A) {
+ %B = or int %A, -1
+ ret int %B
+}
+
+ubyte %test2a(ubyte %A) {
+ %B = or ubyte %A, 255
+ ret ubyte %B
+}
+
+bool %test3(bool %A) {
+ %B = or bool %A, false
+ ret bool %B
+}
+
+bool %test4(bool %A) {
+ %B = or bool %A, true
+ ret bool %B
+}
+
+bool %test5(bool %A) {
+ %B = or bool %A, %A
+ ret bool %B
+}
+
+int %test6(int %A) {
+ %B = or int %A, %A
+ ret int %B
+}
+
+int %test7(int %A) { ; A | ~A == -1
+ %NotA = xor int -1, %A
+ %B = or int %A, %NotA
+ ret int %B
+}
+
+ubyte %test8(ubyte %A) {
+ %B = or ubyte %A, 254
+ %C = or ubyte %B, 1
+ ret ubyte %C
+}
+
+ubyte %test9(ubyte %A, ubyte %B) { ; Test that (A|c1)|(B|c2) == (A|B)|(c1|c2)
+ %C = or ubyte %A, 1
+ %D = or ubyte %B, 254
+ %E = or ubyte %C, %D
+ ret ubyte %E
+}
+
+ubyte %test10(ubyte %A) {
+ %B = or ubyte %A, 1
+ %C = and ubyte %B, 254
+ %D = or ubyte %C, 254 ; (X & C1) | C2 --> (X | C2) & (C1|C2)
+ ret ubyte %D
+}
+
+ubyte %test11(ubyte %A) {
+ %B = or ubyte %A, 254
+ %C = xor ubyte %B, 13
+ %D = or ubyte %C, 1 ; (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
+ %E = xor ubyte %D, 12
+ ret ubyte %E
+}
+
+uint %test12(uint %A) {
+ %B = or uint %A, 4 ; Should be eliminated
+ %C = and uint %B, 8
+ ret uint %C
+}
+
+uint %test13(uint %A) {
+ %B = or uint %A, 12
+ %C = and uint %B, 8 ; Always equal to 8
+ ret uint %C
+}
+
+bool %test14(uint %A, uint %B) {
+ %C1 = setlt uint %A, %B
+ %C2 = setgt uint %A, %B
+ %D = or bool %C1, %C2 ; (A < B) | (A > B) === A != B
+ ret bool %D
+}
+
+bool %test15(uint %A, uint %B) {
+ %C1 = setlt uint %A, %B
+ %C2 = seteq uint %A, %B
+ %D = or bool %C1, %C2 ; (A < B) | (A == B) === A <= B
+ ret bool %D
+}
+
+int %test16(int %A) {
+ %B = and int %A, 1
+ %C = and int %A, -2 ; -2 = ~1
+ %D = or int %B, %C ; %D = and int %B, -1 == %B
+ ret int %D
+}
+
+int %test17(int %A) {
+ %B = and int %A, 1
+ %C = and int %A, 4
+ %D = or int %B, %C ; %D = and int %B, 5
+ ret int %D
+}
+
+bool %test18(int %A) {
+ %B = setge int %A, 100
+ %C = setlt int %A, 50
+ %D = or bool %B, %C ;; (A-50) >u 50
+ ret bool %D
+}
+
+bool %test19(int %A) {
+ %B = seteq int %A, 50
+ %C = seteq int %A, 51
+ %D = or bool %B, %C ;; (A-50) < 2
+ ret bool %D
+}
+
+int %test20(int %x) {
+ %y = and int %x, 123
+ %z = or int %y, %x
+ ret int %z
+}
+
+uint %test21(uint %tmp.1) {
+ %tmp.1.mask1 = add uint %tmp.1, 2
+ %tmp.3 = and uint %tmp.1.mask1, 4294967294
+ %tmp.5 = and uint %tmp.1, 1
+ %tmp.6 = or uint %tmp.5, %tmp.3 ;; add tmp.1, 2
+ ret uint %tmp.6
+}
+
+int %test22(int %B) {
+ %ELIM41 = and int %B, 1 ; <int> [#uses=1]
+ %ELIM7 = and int %B, -2 ; <int> [#uses=1]
+ %ELIM5 = or int %ELIM41, %ELIM7 ; <int> [#uses=1]
+ ret int %ELIM5
+}
+
+ushort %test23(ushort %A) {
+ %B = shr ushort %A, ubyte 1
+ %C = or ushort %B, 32768 ;; fold or into xor
+ %D = xor ushort %C, 8193
+ ret ushort %D
+}
diff --git a/test/Transforms/InstCombine/or2.ll b/test/Transforms/InstCombine/or2.ll
new file mode 100644
index 0000000..c632a5d
--- /dev/null
+++ b/test/Transforms/InstCombine/or2.ll
@@ -0,0 +1,10 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep { or}
+
+define i32 @test1(i32 %b, i32 %c, i32 %d) {
+ %tmp3 = and i32 %c, %b
+ %tmp4not = xor i32 %b, -1
+ %tmp6 = and i32 %d, %tmp4not
+ %tmp7 = or i32 %tmp6, %tmp3
+ ret i32 %tmp7
+}
+
diff --git a/test/Transforms/InstCombine/phi.ll b/test/Transforms/InstCombine/phi.ll
new file mode 100644
index 0000000..a51e90e
--- /dev/null
+++ b/test/Transforms/InstCombine/phi.ll
@@ -0,0 +1,78 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep phi
+; END.
+
+implementation
+
+int %test1(int %A, bool %b) {
+BB0: br bool %b, label %BB1, label %BB2
+BB1:
+ %B = phi int [%A, %BB0] ; Combine away one argument PHI nodes
+ ret int %B
+BB2:
+ ret int %A
+}
+
+int %test2(int %A, bool %b) {
+BB0: br bool %b, label %BB1, label %BB2
+BB1:
+ br label %BB2
+BB2:
+ %B = phi int [%A, %BB0], [%A, %BB1] ; Combine away PHI nodes with same values
+ ret int %B
+}
+
+int %test3(int %A, bool %b) {
+BB0: br label %Loop
+
+Loop:
+ %B = phi int [%A, %BB0], [%B, %Loop] ; PHI has same value always.
+ br bool %b, label %Loop, label %Exit
+Exit:
+ ret int %B
+}
+
+int %test4(bool %b) {
+BB0: ret int 7 ; Loop is unreachable
+
+Loop:
+ %B = phi int [%B, %L2], [%B, %Loop] ; PHI has same value always.
+ br bool %b, label %L2, label %Loop
+L2:
+ br label %Loop
+}
+
+int %test5(int %A, bool %b) {
+BB0: br label %Loop
+
+Loop:
+ %B = phi int [%A, %BB0], [undef, %Loop] ; PHI has same value always.
+ br bool %b, label %Loop, label %Exit
+Exit:
+ ret int %B
+}
+
+uint %test6(int %A, bool %b) {
+BB0:
+ %X = cast int %A to uint
+ br bool %b, label %BB1, label %BB2
+BB1:
+ %Y = cast int %A to uint
+ br label %BB2
+BB2:
+ %B = phi uint [%X, %BB0], [%Y, %BB1] ;; Suck casts into phi
+ ret uint %B
+}
+
+int %test7(int %A, bool %b) {
+BB0: br label %Loop
+
+Loop:
+ %B = phi int [%A, %BB0], [%C, %Loop] ; PHI is dead.
+ %C = add int %B, 123
+ br bool %b, label %Loop, label %Exit
+Exit:
+ ret int 0
+}
+
diff --git a/test/Transforms/InstCombine/rem.ll b/test/Transforms/InstCombine/rem.ll
new file mode 100644
index 0000000..987d3c3
--- /dev/null
+++ b/test/Transforms/InstCombine/rem.ll
@@ -0,0 +1,79 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep rem
+; END.
+
+implementation
+
+int %test1(int %A) {
+ %B = rem int %A, 1 ; ISA constant 0
+ ret int %B
+}
+
+int %test2(int %A) { ; 0 % X = 0, we don't need to preserve traps
+ %B = rem int 0, %A
+ ret int %B
+}
+
+uint %test3(uint %A) {
+ %B = rem uint %A, 8 ; & 7
+ ret uint %B
+}
+
+bool %test3a(int %A) {
+ %B = rem int %A, -8 ; & 7
+ %C = setne int %B, 0
+ ret bool %C
+}
+
+uint %test4(uint %X, bool %C) {
+ %V = select bool %C, uint 1, uint 8
+ %R = rem uint %X, %V
+ ret uint %R
+}
+
+uint %test5(uint %X, ubyte %B) {
+ %Amt = shl uint 32, ubyte %B
+ %V = rem uint %X, %Amt
+ ret uint %V
+}
+
+int %test6(int %A) {
+ %B = rem int %A, 0 ;; undef
+ ret int %B
+}
+
+int %test7(int %A) {
+ %B = mul int %A, 26
+ %C = rem int %B, 13
+ ret int %C
+}
+
+int %test8(int %A) {
+ %B = shl int %A, ubyte 4
+ %C = rem int %B, 8
+ ret int %C
+}
+
+uint %test9(uint %A) {
+ %B = mul uint %A, 124
+ %C = rem uint %B, 62
+ ret uint %C
+}
+
+int %test10(ubyte %c) {
+ %tmp.1 = cast ubyte %c to int
+ %tmp.2 = mul int %tmp.1, 3
+ %tmp.3 = cast int %tmp.2 to ulong
+ %tmp.5 = rem ulong %tmp.3, 3
+ %tmp.6 = cast ulong %tmp.5 to int
+ ret int %tmp.6
+}
+
+int %test11(int %i) {
+ %tmp.1 = and int %i, -2
+ %tmp.3 = mul int %tmp.1, 3
+ %tmp.5 = rem int %tmp.3, 6
+ ret int %tmp.5
+}
+
diff --git a/test/Transforms/InstCombine/select.ll b/test/Transforms/InstCombine/select.ll
new file mode 100644
index 0000000..ccc63c2
--- /dev/null
+++ b/test/Transforms/InstCombine/select.ll
@@ -0,0 +1,182 @@
+; This test makes sure that these instructions are properly eliminated.
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep select
+; END.
+
+implementation
+
+int %test1(int %A, int %B) {
+ %C = select bool false, int %A, int %B
+ ret int %C
+}
+
+int %test2(int %A, int %B) {
+ %C = select bool true, int %A, int %B
+ ret int %C
+}
+
+int %test3(bool %C, int %I) {
+ %V = select bool %C, int %I, int %I ; V = I
+ ret int %V
+}
+
+bool %test4(bool %C) {
+ %V = select bool %C, bool true, bool false ; V = C
+ ret bool %V
+}
+
+bool %test5(bool %C) {
+ %V = select bool %C, bool false, bool true ; V = !C
+ ret bool %V
+}
+
+int %test6(bool %C) {
+ %V = select bool %C, int 1, int 0 ; V = cast C to int
+ ret int %V
+}
+
+bool %test7(bool %C, bool %X) {
+ %R = select bool %C, bool true, bool %X ; R = or C, X
+ ret bool %R
+}
+
+bool %test8(bool %C, bool %X) {
+ %R = select bool %C, bool %X, bool false ; R = and C, X
+ ret bool %R
+}
+
+bool %test9(bool %C, bool %X) {
+ %R = select bool %C, bool false, bool %X ; R = and !C, X
+ ret bool %R
+}
+
+bool %test10(bool %C, bool %X) {
+ %R = select bool %C, bool %X, bool true ; R = or !C, X
+ ret bool %R
+}
+
+int %test11(int %a) {
+ %C = seteq int %a, 0
+ %R = select bool %C, int 0, int 1
+ ret int %R
+}
+
+int %test12(bool %cond, int %a) {
+ %b = or int %a, 1
+ %c = select bool %cond, int %b, int %a
+ ret int %c
+}
+
+int %test12a(bool %cond, int %a) {
+ %b = shr int %a, ubyte 1
+ %c = select bool %cond, int %b, int %a
+ ret int %c
+}
+
+int %test12b(bool %cond, int %a) {
+ %b = shr int %a, ubyte 1
+ %c = select bool %cond, int %a, int %b
+ ret int %c
+}
+
+int %test13(int %a, int %b) {
+ %C = seteq int %a, %b
+ %V = select bool %C, int %a, int %b
+ ret int %V
+}
+
+int %test13a(int %a, int %b) {
+ %C = setne int %a, %b
+ %V = select bool %C, int %a, int %b
+ ret int %V
+}
+
+int %test13b(int %a, int %b) {
+ %C = seteq int %a, %b
+ %V = select bool %C, int %b, int %a
+ ret int %V
+}
+
+bool %test14a(bool %C, int %X) {
+ %V = select bool %C, int %X, int 0
+ %R = setlt int %V, 1 ; (X < 1) | !C
+ ret bool %R
+}
+
+bool %test14b(bool %C, int %X) {
+ %V = select bool %C, int 0, int %X
+ %R = setlt int %V, 1 ; (X < 1) | C
+ ret bool %R
+}
+
+int %test15a(int %X) { ;; Code sequence for (X & 16) ? 16 : 0
+ %t1 = and int %X, 16
+ %t2 = seteq int %t1, 0
+ %t3 = select bool %t2, int 0, int 16 ;; X & 16
+ ret int %t3
+}
+
+int %test15b(int %X) { ;; Code sequence for (X & 32) ? 0 : 24
+ %t1 = and int %X, 32
+ %t2 = seteq int %t1, 0
+ %t3 = select bool %t2, int 32, int 0 ;; ~X & 32
+ ret int %t3
+}
+
+int %test15c(int %X) { ;; Alternate code sequence for (X & 16) ? 16 : 0
+ %t1 = and int %X, 16
+ %t2 = seteq int %t1, 16
+ %t3 = select bool %t2, int 16, int 0 ;; X & 16
+ ret int %t3
+}
+
+int %test15d(int %X) { ;; Alternate code sequence for (X & 16) ? 16 : 0
+ %t1 = and int %X, 16
+ %t2 = setne int %t1, 0
+ %t3 = select bool %t2, int 16, int 0 ;; X & 16
+ ret int %t3
+}
+
+int %test16(bool %C, int* %P) {
+ %P2 = select bool %C, int* %P, int* null
+ %V = load int* %P2
+ ret int %V
+}
+
+bool %test17(int* %X, bool %C) {
+ %R = select bool %C, int* %X, int* null
+ %RV = seteq int* %R, null
+ ret bool %RV
+}
+
+int %test18(int %X, int %Y, bool %C) {
+ %R = select bool %C, int %X, int 0
+ %V = div int %Y, %R ; div Y,X
+ ret int %V
+}
+
+int %test19(uint %x) {
+ %tmp = setgt uint %x, 2147483647
+ %retval = select bool %tmp, int -1, int 0
+ ret int %retval
+}
+
+int %test20(int %x) {
+ %tmp = setlt int %x, 0
+ %retval = select bool %tmp, int -1, int 0
+ ret int %retval
+}
+
+long %test21(int %x) {
+ %tmp = setlt int %x, 0
+ %retval = select bool %tmp, long -1, long 0
+ ret long %retval
+}
+
+short %test22(int %x) {
+ %tmp = setlt int %x, 0
+ %retval = select bool %tmp, short -1, short 0
+ ret short %retval
+}
+
diff --git a/test/Transforms/InstCombine/set.ll b/test/Transforms/InstCombine/set.ll
new file mode 100644
index 0000000..51cffbe
--- /dev/null
+++ b/test/Transforms/InstCombine/set.ll
@@ -0,0 +1,152 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep icmp
+; END.
+
+%X = uninitialized global int
+
+bool %test1(int %A) {
+ %B = seteq int %A, %A
+ %C = seteq int* %X, null ; Never true
+ %D = and bool %B, %C
+ ret bool %D
+}
+
+bool %test2(int %A) {
+ %B = setne int %A, %A
+ %C = setne int* %X, null ; Never false
+ %D = or bool %B, %C
+ ret bool %D
+}
+
+bool %test3(int %A) {
+ %B = setlt int %A, %A
+ ret bool %B
+}
+
+bool %test4(int %A) {
+ %B = setgt int %A, %A
+ ret bool %B
+}
+
+bool %test5(int %A) {
+ %B = setle int %A, %A
+ ret bool %B
+}
+
+bool %test6(int %A) {
+ %B = setge int %A, %A
+ ret bool %B
+}
+
+bool %test7(uint %A) {
+ %B = setge uint %A, 0 ; true
+ ret bool %B
+}
+
+bool %test8(uint %A) {
+ %B = setlt uint %A, 0 ; false
+ ret bool %B
+}
+
+;; test operations on boolean values these should all be eliminated$a
+bool %test9(bool %A) {
+ %B = setlt bool %A, false ; false
+ ret bool %B
+}
+bool %test10(bool %A) {
+ %B = setgt bool %A, true ; false
+ ret bool %B
+}
+bool %test11(bool %A) {
+ %B = setle bool %A, true ; true
+ ret bool %B
+}
+bool %test12(bool %A) {
+ %B = setge bool %A, false ; true
+ ret bool %B
+}
+bool %test13(bool %A, bool %B) {
+ %C = setge bool %A, %B ; A | ~B
+ ret bool %C
+}
+bool %test14(bool %A, bool %B) {
+ %C = seteq bool %A, %B ; ~(A ^ B)
+ ret bool %C
+}
+
+bool %test16(uint %A) {
+ %B = and uint %A, 5
+ %C = seteq uint %B, 8 ; Is never true
+ ret bool %C
+}
+
+bool %test17(ubyte %A) {
+ %B = or ubyte %A, 1
+ %C = seteq ubyte %B, 2 ; Always false
+ ret bool %C
+}
+
+bool %test18(bool %C, int %a) {
+entry:
+ br bool %C, label %endif, label %else
+
+else:
+ br label %endif
+
+endif:
+ %b.0 = phi int [ 0, %entry ], [ 1, %else ]
+ %tmp.4 = setlt int %b.0, 123
+ ret bool %tmp.4
+}
+
+bool %test19(bool %A, bool %B) {
+ %a = cast bool %A to int
+ %b = cast bool %B to int
+ %C = seteq int %a, %b
+ ret bool %C
+}
+
+uint %test20(uint %A) {
+ %B = and uint %A, 1
+ %C = setne uint %B, 0
+ %D = cast bool %C to uint
+ ret uint %D
+}
+
+int %test21(int %a) {
+ %tmp.6 = and int %a, 4
+ %not.tmp.7 = setne int %tmp.6, 0
+ %retval = cast bool %not.tmp.7 to int
+ ret int %retval
+}
+
+bool %test22(uint %A, int %X) {
+ %B = and uint %A, 100663295
+ %C = setlt uint %B, 268435456
+ %Y = and int %X, 7
+ %Z = setgt int %Y, -1
+ %R = or bool %C, %Z
+ ret bool %R
+}
+
+int %test23(int %a) {
+ %tmp.1 = and int %a, 1
+ %tmp.2 = seteq int %tmp.1, 0
+ %tmp.3 = cast bool %tmp.2 to int ;; xor tmp1, 1
+ ret int %tmp.3
+}
+
+int %test24(uint %a) {
+ %tmp1 = and uint %a, 4
+ %tmp.1 = shr uint %tmp1, ubyte 2
+ %tmp.2 = seteq uint %tmp.1, 0
+ %tmp.3 = cast bool %tmp.2 to int ;; xor tmp1, 1
+ ret int %tmp.3
+}
+
+bool %test25(uint %A) {
+ %B = and uint %A, 2
+ %C = setgt uint %B, 2
+ ret bool %C
+}
diff --git a/test/Transforms/InstCombine/setcc-cast-cast.ll b/test/Transforms/InstCombine/setcc-cast-cast.ll
new file mode 100644
index 0000000..903f0b4
--- /dev/null
+++ b/test/Transforms/InstCombine/setcc-cast-cast.ll
@@ -0,0 +1,45 @@
+; This test case was reduced from MultiSource/Applications/hbd. It makes sure
+; that folding doesn't happen in case a zext is applied where a sext should have
+; been when a setcc is used with two casts.
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep {br bool false}
+; END.
+
+int %bug(ubyte %inbuff) {
+entry:
+ %tmp = bitcast ubyte %inbuff to sbyte ; <sbyte> [#uses=1]
+ %tmp = sext sbyte %tmp to int ; <int> [#uses=3]
+ %tmp = seteq int %tmp, 1 ; <bool> [#uses=1]
+ br bool %tmp, label %cond_true, label %cond_next
+
+cond_true: ; preds = %entry
+ br label %bb
+
+cond_next: ; preds = %entry
+ %tmp3 = seteq int %tmp, -1 ; <bool> [#uses=1]
+ br bool %tmp3, label %cond_true4, label %cond_next5
+
+cond_true4: ; preds = %cond_next
+ br label %bb
+
+cond_next5: ; preds = %cond_next
+ %tmp7 = setgt int %tmp, 1 ; <bool> [#uses=1]
+ br bool %tmp7, label %cond_true8, label %cond_false
+
+cond_true8: ; preds = %cond_next5
+ br label %cond_next9
+
+cond_false: ; preds = %cond_next5
+ br label %cond_next9
+
+cond_next9: ; preds = %cond_false, %cond_true8
+ %iftmp.1.0 = phi int [ 42, %cond_true8 ], [ 23, %cond_false ] ; <int> [#uses=1]
+ br label %return
+
+bb: ; preds = %cond_true4, %cond_true
+ br label %return
+
+return: ; preds = %bb, %cond_next9
+ %retval.0 = phi int [ 17, %bb ], [ %iftmp.1.0, %cond_next9 ] ; <int> [#uses=1]
+ ret int %retval.0
+}
diff --git a/test/Transforms/InstCombine/setcc-strength-reduce.ll b/test/Transforms/InstCombine/setcc-strength-reduce.ll
new file mode 100644
index 0000000..b5ea837
--- /dev/null
+++ b/test/Transforms/InstCombine/setcc-strength-reduce.ll
@@ -0,0 +1,32 @@
+; This test ensures that "strength reduction" of conditional expressions are
+; working. Basically this boils down to converting setlt,gt,le,ge instructions
+; into equivalent setne,eq instructions.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep -v {icmp eq} | grep -v {icmp ne} | not grep icmp
+; END.
+
+bool %test1(uint %A) {
+ %B = setge uint %A, 1 ; setne %A, 0
+ ret bool %B
+}
+
+bool %test2(uint %A) {
+ %B = setgt uint %A, 0 ; setne %A, 0
+ ret bool %B
+}
+
+bool %test3(sbyte %A) {
+ %B = setge sbyte %A, -127 ; setne %A, -128
+ ret bool %B
+}
+
+bool %test4(sbyte %A) {
+ %B = setle sbyte %A, 126 ; setne %A, 127
+ ret bool %B
+}
+
+bool %test5(sbyte %A) {
+ %B = setlt sbyte %A, 127 ; setne %A, 127
+ ret bool %B
+}
diff --git a/test/Transforms/InstCombine/shift-simplify.ll b/test/Transforms/InstCombine/shift-simplify.ll
new file mode 100644
index 0000000..e028385
--- /dev/null
+++ b/test/Transforms/InstCombine/shift-simplify.ll
@@ -0,0 +1,42 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: egrep {shl|lshr|ashr} | wc -l | grep 3
+
+define i32 @test0(i32 %A, i32 %B, i32 %C) {
+ %X = shl i32 %A, %C
+ %Y = shl i32 %B, %C
+ %Z = and i32 %X, %Y
+ ret i32 %Z
+}
+
+define i32 @test1(i32 %A, i32 %B, i32 %C) {
+ %X = lshr i32 %A, %C
+ %Y = lshr i32 %B, %C
+ %Z = or i32 %X, %Y
+ ret i32 %Z
+}
+
+define i32 @test2(i32 %A, i32 %B, i32 %C) {
+ %X = ashr i32 %A, %C
+ %Y = ashr i32 %B, %C
+ %Z = xor i32 %X, %Y
+ ret i32 %Z
+}
+
+define i1 @test3(i32 %X) {
+ %tmp1 = shl i32 %X, 7
+ %tmp2 = icmp slt i32 %tmp1, 0
+ ret i1 %tmp2
+}
+
+define i1 @test4(i32 %X) {
+ %tmp1 = lshr i32 %X, 7
+ %tmp2 = icmp slt i32 %tmp1, 0
+ ret i1 %tmp2
+}
+
+define i1 @test5(i32 %X) {
+ %tmp1 = ashr i32 %X, 7
+ %tmp2 = icmp slt i32 %tmp1, 0
+ ret i1 %tmp2
+}
+
diff --git a/test/Transforms/InstCombine/shift-sra.ll b/test/Transforms/InstCombine/shift-sra.ll
new file mode 100644
index 0000000..f390772
--- /dev/null
+++ b/test/Transforms/InstCombine/shift-sra.ll
@@ -0,0 +1,17 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep {lshr i32} | wc -l | grep 2
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep ashr
+
+int %test1(int %X, ubyte %A) {
+ %Y = shr int %X, ubyte %A ; can be logical shift.
+ %Z = and int %Y, 1
+ ret int %Z
+}
+
+int %test2(ubyte %tmp) {
+ %tmp3 = cast ubyte %tmp to int
+ %tmp4 = add int %tmp3, 7
+ %tmp5 = ashr int %tmp4, ubyte 3 ; lshr
+ ret int %tmp5
+}
+
diff --git a/test/Transforms/InstCombine/shift.ll b/test/Transforms/InstCombine/shift.ll
new file mode 100644
index 0000000..5ce8070
--- /dev/null
+++ b/test/Transforms/InstCombine/shift.ll
@@ -0,0 +1,189 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep sh
+; END.
+
+implementation
+
+int %test1(int %A) {
+ %B = shl int %A, ubyte 0
+ ret int %B
+}
+
+int %test2(ubyte %A) {
+ %B = shl int 0, ubyte %A
+ ret int %B
+}
+
+int %test3(int %A) {
+ %B = shr int %A, ubyte 0
+ ret int %B
+}
+
+int %test4(ubyte %A) {
+ %B = shr int 0, ubyte %A
+ ret int %B
+}
+
+uint %test5(uint %A) {
+ %B = shr uint %A, ubyte 32 ;; shift all bits out
+ ret uint %B
+}
+
+uint %test5a(uint %A) {
+ %B = shl uint %A, ubyte 32 ;; shift all bits out
+ ret uint %B
+}
+
+uint %test6(uint %A) {
+ %B = shl uint %A, ubyte 1 ;; convert to an mul instruction
+ %C = mul uint %B, 3
+ ret uint %C
+}
+
+int %test7(ubyte %A) {
+ %B = shr int -1, ubyte %A ;; Always equal to -1
+ ret int %B
+}
+
+ubyte %test8(ubyte %A) { ;; (A << 5) << 3 === A << 8 == 0
+ %B = shl ubyte %A, ubyte 5
+ %C = shl ubyte %B, ubyte 3
+ ret ubyte %C
+}
+
+ubyte %test9(ubyte %A) { ;; (A << 7) >> 7 === A & 1
+ %B = shl ubyte %A, ubyte 7
+ %C = shr ubyte %B, ubyte 7
+ ret ubyte %C
+}
+
+ubyte %test10(ubyte %A) { ;; (A >> 7) << 7 === A & 128
+ %B = shr ubyte %A, ubyte 7
+ %C = shl ubyte %B, ubyte 7
+ ret ubyte %C
+}
+
+ubyte %test11(ubyte %A) { ;; (A >> 3) << 4 === (A & 0x1F) << 1
+ %a = mul ubyte %A, 3
+ %B = shr ubyte %a, ubyte 3
+ %C = shl ubyte %B, ubyte 4
+ ret ubyte %C
+}
+
+int %test12(int %A) {
+ %B = shr int %A, ubyte 8 ;; (A >> 8) << 8 === A & -256
+ %C = shl int %B, ubyte 8
+ ret int %C
+}
+
+sbyte %test13(sbyte %A) { ;; (A >> 3) << 4 === (A & -8) * 2
+ %a = mul sbyte %A, 3
+ %B = shr sbyte %a, ubyte 3
+ %C = shl sbyte %B, ubyte 4
+ ret sbyte %C
+}
+
+uint %test14(uint %A) {
+ %B = shr uint %A, ubyte 4
+ %C = or uint %B, 1234
+ %D = shl uint %C, ubyte 4 ;; D = ((B | 1234) << 4) === ((B << 4)|(1234 << 4)
+ ret uint %D
+}
+uint %test14a(uint %A) {
+ %B = shl uint %A, ubyte 4
+ %C = and uint %B, 1234
+ %D = shr uint %C, ubyte 4 ;; D = ((B | 1234) << 4) === ((B << 4)|(1234 << 4)
+ ret uint %D
+}
+
+int %test15(bool %C) {
+ %A = select bool %C, int 3, int 1
+ %V = shl int %A, ubyte 2
+ ret int %V
+}
+
+int %test15a(bool %C) {
+ %A = select bool %C, ubyte 3, ubyte 1
+ %V = shl int 64, ubyte %A
+ ret int %V
+}
+
+bool %test16(int %X) {
+ %tmp.3 = shr int %X, ubyte 4
+ %tmp.6 = and int %tmp.3, 1
+ %tmp.7 = setne int %tmp.6, 0 ;; X & 16 != 0
+ ret bool %tmp.7
+}
+
+bool %test17(uint %A) {
+ %B = shr uint %A, ubyte 3
+ %C = seteq uint %B, 1234
+ ret bool %C
+}
+
+bool %test18(ubyte %A) {
+ %B = shr ubyte %A, ubyte 7
+ %C = seteq ubyte %B, 123 ;; false
+ ret bool %C
+}
+
+bool %test19(int %A) {
+ %B = shr int %A, ubyte 2
+ %C = seteq int %B, 0 ;; (X & -4) == 0
+ ret bool %C
+}
+
+bool %test19a(int %A) {
+ %B = shr int %A, ubyte 2
+ %C = seteq int %B, -1 ;; (X & -4) == -4
+ ret bool %C
+}
+
+bool %test20(sbyte %A) {
+ %B = shr sbyte %A, ubyte 7
+ %C = seteq sbyte %B, 123 ;; false
+ ret bool %C
+}
+
+bool %test21(ubyte %A) {
+ %B = shl ubyte %A, ubyte 4
+ %C = seteq ubyte %B, 128
+ ret bool %C
+}
+
+bool %test22(ubyte %A) {
+ %B = shl ubyte %A, ubyte 4
+ %C = seteq ubyte %B, 0
+ ret bool %C
+}
+
+sbyte %test23(int %A) {
+ %B = shl int %A, ubyte 24 ;; casts not needed
+ %C = shr int %B, ubyte 24
+ %D = cast int %C to sbyte
+ ret sbyte %D
+}
+
+sbyte %test24(sbyte %X) {
+ %Y = and sbyte %X, -5 ; ~4
+ %Z = shl sbyte %Y, ubyte 5
+ %Q = shr sbyte %Z, ubyte 5
+ ret sbyte %Q
+}
+
+uint %test25(uint %tmp.2, uint %AA) {
+ %x = shr uint %AA, ubyte 17
+ %tmp.3 = shr uint %tmp.2, ubyte 17 ; <uint> [#uses=1]
+ %tmp.5 = add uint %tmp.3, %x ; <uint> [#uses=1]
+ %tmp.6 = shl uint %tmp.5, ubyte 17 ; <uint> [#uses=1]
+ ret uint %tmp.6
+}
+
+int %test26(uint %A) { ;; handle casts between shifts.
+ %B = shr uint %A, ubyte 1
+ %C = cast uint %B to int
+ %D = shl int %C, ubyte 1
+ ret int %D
+}
+
diff --git a/test/Transforms/InstCombine/shl-trunc.ll b/test/Transforms/InstCombine/shl-trunc.ll
new file mode 100644
index 0000000..bfd41a8
--- /dev/null
+++ b/test/Transforms/InstCombine/shl-trunc.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep shl
+
+bool %test(int %X, ubyte %A) {
+ %B = lshr int %X, ubyte %A
+ %D = trunc int %B to bool
+ ret bool %D
+}
diff --git a/test/Transforms/InstCombine/signext.ll b/test/Transforms/InstCombine/signext.ll
new file mode 100644
index 0000000..f69cbc0
--- /dev/null
+++ b/test/Transforms/InstCombine/signext.ll
@@ -0,0 +1,44 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep {(and\|xor\|add\|shl\|shr)}
+; END.
+
+int %test1(int %x) {
+ %tmp.1 = and int %x, 65535 ; <int> [#uses=1]
+ %tmp.2 = xor int %tmp.1, -32768 ; <int> [#uses=1]
+ %tmp.3 = add int %tmp.2, 32768 ; <int> [#uses=1]
+ ret int %tmp.3
+}
+
+int %test2(int %x) {
+ %tmp.1 = and int %x, 65535 ; <int> [#uses=1]
+ %tmp.2 = xor int %tmp.1, 32768 ; <int> [#uses=1]
+ %tmp.3 = add int %tmp.2, -32768 ; <int> [#uses=1]
+ ret int %tmp.3
+}
+
+int %test3(ushort %P) {
+ %tmp.1 = cast ushort %P to int ; <int> [#uses=1]
+ %tmp.4 = xor int %tmp.1, 32768 ; <int> [#uses=1]
+ %tmp.5 = add int %tmp.4, -32768 ; <int> [#uses=1]
+ ret int %tmp.5
+}
+
+uint %test4(ushort %P) {
+ %tmp.1 = cast ushort %P to uint ; <uint> [#uses=1]
+ %tmp.4 = xor uint %tmp.1, 32768 ; <uint> [#uses=1]
+ %tmp.5 = add uint %tmp.4, 4294934528 ; <uint> [#uses=1]
+ ret uint %tmp.5
+}
+
+int %test5(int %x) {
+ %tmp.1 = and int %x, 254
+ %tmp.2 = xor int %tmp.1, 128
+ %tmp.3 = add int %tmp.2, -128
+ ret int %tmp.3
+}
+
+int %test6(int %x) {
+ %tmp.2 = shl int %x, ubyte 16 ; <int> [#uses=1]
+ %tmp.4 = shr int %tmp.2, ubyte 16 ; <int> [#uses=1]
+ ret int %tmp.4
+}
diff --git a/test/Transforms/InstCombine/sink_instruction.ll b/test/Transforms/InstCombine/sink_instruction.ll
new file mode 100644
index 0000000..93519ff
--- /dev/null
+++ b/test/Transforms/InstCombine/sink_instruction.ll
@@ -0,0 +1,18 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: %prcontext div 1 | grep ret
+
+;; This tests that the instructions in the entry blocks are sunk into each
+;; arm of the 'if'.
+
+int %foo(bool %C, int %A, int %B) {
+entry:
+ %tmp.2 = div int %A, %B
+ %tmp.9 = add int %B, %A
+ br bool %C, label %then, label %endif
+
+then:
+ ret int %tmp.9
+
+endif:
+ ret int %tmp.2
+}
diff --git a/test/Transforms/InstCombine/stacksaverestore.ll b/test/Transforms/InstCombine/stacksaverestore.ll
new file mode 100644
index 0000000..f9cc9c2
--- /dev/null
+++ b/test/Transforms/InstCombine/stacksaverestore.ll
@@ -0,0 +1,19 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep call
+
+;; Test that llvm.stackrestore is removed when possible.
+
+int* %test1(uint %P) {
+ %tmp = call sbyte* %llvm.stacksave()
+ call void %llvm.stackrestore(sbyte* %tmp) ;; not restoring anything
+ %A = alloca int, uint %P
+ ret int* %A
+}
+
+void %test2(sbyte* %X) {
+ call void %llvm.stackrestore(sbyte* %X) ;; no allocas before return.
+ ret void
+}
+
+declare sbyte* %llvm.stacksave()
+
+declare void %llvm.stackrestore(sbyte*)
diff --git a/test/Transforms/InstCombine/store-merge.ll b/test/Transforms/InstCombine/store-merge.ll
new file mode 100644
index 0000000..4df30e4
--- /dev/null
+++ b/test/Transforms/InstCombine/store-merge.ll
@@ -0,0 +1,37 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: grep {ret i32 %.toremerge} | wc -l | grep 2
+;; Simple sinking tests
+
+; "if then else"
+define i32 @test1(i1 %C) {
+ %A = alloca i32
+ br i1 %C, label %Cond, label %Cond2
+
+Cond:
+ store i32 -987654321, i32* %A
+ br label %Cont
+
+Cond2:
+ store i32 47, i32* %A
+ br label %Cont
+
+Cont:
+ %V = load i32* %A
+ ret i32 %V
+}
+
+; "if then"
+define i32 @test2(i1 %C) {
+ %A = alloca i32
+ store i32 47, i32* %A
+ br i1 %C, label %Cond, label %Cont
+
+Cond:
+ store i32 -987654321, i32* %A
+ br label %Cont
+
+Cont:
+ %V = load i32* %A
+ ret i32 %V
+}
+
diff --git a/test/Transforms/InstCombine/store.ll b/test/Transforms/InstCombine/store.ll
new file mode 100644
index 0000000..0b7d12f
--- /dev/null
+++ b/test/Transforms/InstCombine/store.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep -v {store.*,.*null} | not grep store
+
+void %test1(int* %P) {
+ store int undef, int* %P
+ store int 123, int* undef
+ store int 124, int* null
+ ret void
+}
+
+void %test2(int* %P) {
+ %X = load int* %P
+ %Y = add int %X, 0
+ store int %Y, int* %P
+ ret void
+}
diff --git a/test/Transforms/InstCombine/sub.ll b/test/Transforms/InstCombine/sub.ll
new file mode 100644
index 0000000..dfee312
--- /dev/null
+++ b/test/Transforms/InstCombine/sub.ll
@@ -0,0 +1,139 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep -v {sub i32 %Cok, %Bok} | not grep sub
+
+implementation
+
+int %test1(int %A) {
+ %B = sub int %A, %A ; ISA constant 0
+ ret int %B
+}
+
+int %test2(int %A) {
+ %B = sub int %A, 0
+ ret int %B
+}
+
+int %test3(int %A) {
+ %B = sub int 0, %A ; B = -A
+ %C = sub int 0, %B ; C = -B = A
+ ret int %C
+}
+
+int %test4(int %A, int %x) {
+ %B = sub int 0, %A
+ %C = sub int %x, %B
+ ret int %C
+}
+
+int %test5(int %A, int %Bok, int %Cok) {
+ %D = sub int %Bok, %Cok
+ %E = sub int %A, %D
+ ret int %E
+}
+
+int %test6(int %A, int %B) {
+ %C = and int %A, %B ; A - (A & B) => A & ~B
+ %D = sub int %A, %C
+ ret int %D
+}
+
+int %test7(int %A) {
+ %B = sub int -1, %A ; B = ~A
+ ret int %B
+}
+
+int %test8(int %A) {
+ %B = mul int 9, %A
+ %C = sub int %B, %A ; C = 9*A-A == A*8 == A << 3
+ ret int %C
+}
+
+int %test9(int %A) {
+ %B = mul int 3, %A
+ %C = sub int %A, %B ; C = A-3*A == A*-2
+ ret int %C
+}
+
+int %test10(int %A, int %B) { ; -A*-B == A*B
+ %C = sub int 0, %A
+ %D = sub int 0, %B
+ %E = mul int %C, %D
+ ret int %E
+}
+
+int %test10(int %A) { ; -A *c1 == A * -c1
+ %C = sub int 0, %A
+ %E = mul int %C, 7
+ ret int %E
+}
+
+bool %test11(ubyte %A, ubyte %B) {
+ %C = sub ubyte %A, %B
+ %cD = setne ubyte %C, 0 ; == setne A, B
+ ret bool %cD
+}
+
+int %test12(int %A) {
+ %B = shr int %A, ubyte 31
+ %C = sub int 0, %B ; == ushr A, 31
+ ret int %C
+}
+
+uint %test13(uint %A) {
+ %B = shr uint %A, ubyte 31
+ %C = sub uint 0, %B ; == sar A, 31
+ ret uint %C
+}
+
+int %test14(uint %A) {
+ %B = shr uint %A, ubyte 31
+ %C = cast uint %B to int
+ %D = sub int 0, %C
+ ret int %D
+}
+
+int %test15(int %A, int %B) {
+ %C = sub int 0, %A
+ %D = rem int %B, %C ;; X % -Y === X % Y
+ ret int %D
+}
+
+int %test16(int %A) {
+ %X = div int %A, 1123
+ %Y = sub int 0, %X
+ ret int %Y
+}
+
+int %test17(int %A) {
+ %B = sub int 0, %A
+ %C = div int %B, 1234
+ ret int %C
+}
+
+long %test18(long %Y) {
+ %tmp.4 = shl long %Y, ubyte 2
+ %tmp.12 = shl long %Y, ubyte 2
+ %tmp.8 = sub long %tmp.4, %tmp.12 ;; 0
+ ret long %tmp.8
+}
+
+int %test19(int %X, int %Y) {
+ %Z = sub int %X, %Y
+ %Q = add int %Z, %Y
+ ret int %Q
+}
+
+bool %test20(int %g, int %h) {
+ %tmp.2 = sub int %g, %h
+ %tmp.4 = setne int %tmp.2, %g
+ ret bool %tmp.4
+}
+
+bool %test21(int %g, int %h) {
+ %tmp.2 = sub int %g, %h
+ %tmp.4 = setne int %tmp.2, %g
+ ret bool %tmp.4
+}
+
diff --git a/test/Transforms/InstCombine/udiv_select_to_select_shift.ll b/test/Transforms/InstCombine/udiv_select_to_select_shift.ll
new file mode 100644
index 0000000..fe05741
--- /dev/null
+++ b/test/Transforms/InstCombine/udiv_select_to_select_shift.ll
@@ -0,0 +1,17 @@
+; Test that this transform works:
+; udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
+;
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis -f -o %t
+; RUN: grep select %t | wc -l | grep 1
+; RUN: grep lshr %t | wc -l | grep 2
+; RUN: ignore grep udiv %t | wc -l | grep 0
+
+define i64 @test(i64 %X, i1 %Cond ) {
+entry:
+ %divisor1 = select i1 %Cond, i64 8, i64 16
+ %quotient1 = udiv i64 %X, %divisor1
+ %divisor2 = select i1 %Cond, i64 8, i64 0
+ %quotient2 = udiv i64 %X, %divisor2
+ %sum = add i64 %quotient1, %quotient2
+ ret i64 %sum
+}
diff --git a/test/Transforms/InstCombine/vec_demanded_elts.ll b/test/Transforms/InstCombine/vec_demanded_elts.ll
new file mode 100644
index 0000000..03e070f
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -0,0 +1,47 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: grep {sub float}
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: grep {mul float}
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: not grep {insertelement.*0.00}
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: not grep {call.*llvm.x86.sse.mul}
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: not grep {call.*llvm.x86.sse.sub}
+; END.
+
+define i16 @test1(float %f) {
+entry:
+ %tmp = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1]
+ %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
+ %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
+ %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
+ %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
+ %tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000000e-01, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
+ %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553500e+04, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
+ %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializer ) ; <<4 x float>> [#uses=1]
+ %tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 ) ; <i32> [#uses=1]
+ %tmp69 = trunc i32 %tmp.upgrd.1 to i16 ; <i16> [#uses=1]
+ ret i16 %tmp69
+}
+
+define i32 @test2(float %f) {
+ %tmp5 = mul float %f, %f
+ %tmp9 = insertelement <4 x float> undef, float %tmp5, i32 0
+ %tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, i32 1
+ %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2
+ %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3
+ %tmp19 = bitcast <4 x float> %tmp12 to <4 x i32>
+ %tmp21 = extractelement <4 x i32> %tmp19, i32 0
+ ret i32 %tmp21
+}
+
+declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
+
+declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>)
+
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
+
+declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
+
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>)
diff --git a/test/Transforms/InstCombine/vec_extract_elt.ll b/test/Transforms/InstCombine/vec_extract_elt.ll
new file mode 100644
index 0000000..30b2f1d
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_extract_elt.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep extractelement
+
+define i32 @test(float %f) {
+ %tmp7 = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1]
+ %tmp17 = bitcast <4 x float> %tmp7 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp19 = extractelement <4 x i32> %tmp17, i32 0 ; <i32> [#uses=1]
+ ret i32 %tmp19
+}
+
diff --git a/test/Transforms/InstCombine/vec_insert_to_shuffle.ll b/test/Transforms/InstCombine/vec_insert_to_shuffle.ll
new file mode 100644
index 0000000..54ac882
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_insert_to_shuffle.ll
@@ -0,0 +1,18 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep shufflevec | wc -l | grep 1
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep insertelement
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep extractelement
+; END.
+
+<4 x float> %test(<4 x float> %tmp, <4 x float> %tmp1) {
+ %tmp4 = extractelement <4 x float> %tmp, uint 1 ; <float> [#uses=1]
+ %tmp2 = extractelement <4 x float> %tmp, uint 3 ; <float> [#uses=1]
+ %tmp1 = extractelement <4 x float> %tmp1, uint 0 ; <float> [#uses=1]
+ %tmp128 = insertelement <4 x float> undef, float %tmp4, uint 0 ; <<4 x float>> [#uses=1]
+ %tmp130 = insertelement <4 x float> %tmp128, float undef, uint 1 ; <<4 x float>> [#uses=1]
+ %tmp132 = insertelement <4 x float> %tmp130, float %tmp2, uint 2 ; <<4 x float>> [#uses=1]
+ %tmp134 = insertelement <4 x float> %tmp132, float %tmp1, uint 3 ; <<4 x float>> [#uses=1]
+ ret <4 x float> %tmp134
+}
diff --git a/test/Transforms/InstCombine/vec_insertelt.ll b/test/Transforms/InstCombine/vec_insertelt.ll
new file mode 100644
index 0000000..9be154b
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_insertelt.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {ret <4 x i32> %A}
+
+; PR1286
+define <4 x i32> @test1(<4 x i32> %A) {
+ %B = insertelement <4 x i32> %A, i32 undef, i32 1
+ ret <4 x i32> %B
+}
diff --git a/test/Transforms/InstCombine/vec_narrow.ll b/test/Transforms/InstCombine/vec_narrow.ll
new file mode 100644
index 0000000..ca94b08
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_narrow.ll
@@ -0,0 +1,12 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep {add float}
+
+%V = type <4 x float>
+
+float %test(%V %A, %V %B, float %f) {
+ %C = insertelement %V %A, float %f, uint 0
+ %D = add %V %C, %B
+ %E = extractelement %V %D, uint 0
+ ret float %E
+}
+
diff --git a/test/Transforms/InstCombine/vec_shuffle.ll b/test/Transforms/InstCombine/vec_shuffle.ll
new file mode 100644
index 0000000..aaaee3f
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_shuffle.ll
@@ -0,0 +1,47 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep vector_shuffle
+; END.
+
+%T = type <4 x float>
+
+
+define %T @test1(%T %v1) {
+ %v2 = shufflevector %T %v1, %T undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret %T %v2
+}
+
+define %T @test2(%T %v1) {
+ %v2 = shufflevector %T %v1, %T %v1, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret %T %v2
+}
+
+define float @test3(%T %A, %T %B, float %f) {
+ %C = insertelement %T %A, float %f, i32 0
+ %D = shufflevector %T %C, %T %B, <4 x i32> <i32 5, i32 0, i32 2, i32 7>
+ %E = extractelement %T %D, i32 1
+ ret float %E
+}
+
+define i32 @test4(<4 x i32> %X) {
+ %tmp152.i53899.i = shufflevector <4 x i32> %X, <4 x i32> undef, <4 x i32> zeroinitializer
+ %tmp34 = extractelement <4 x i32> %tmp152.i53899.i, i32 0
+ ret i32 %tmp34
+}
+
+define i32 @test5(<4 x i32> %X) {
+ %tmp152.i53899.i = shufflevector <4 x i32> %X, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 undef, i32 undef>
+ %tmp34 = extractelement <4 x i32> %tmp152.i53899.i, i32 0
+ ret i32 %tmp34
+}
+
+define float @test6(<4 x float> %X) {
+ %X1 = bitcast <4 x float> %X to <4 x i32>
+ %tmp152.i53899.i = shufflevector <4 x i32> %X1, <4 x i32> undef, <4 x i32> zeroinitializer
+ %tmp152.i53900.i = bitcast <4 x i32> %tmp152.i53899.i to <4 x float>
+ %tmp34 = extractelement <4 x float> %tmp152.i53900.i, i32 0
+ ret float %tmp34
+}
+
+define <4 x float> @test7(<4 x float> %tmp45.i) {
+ %tmp1642.i = shufflevector <4 x float> %tmp45.i, <4 x float> undef, <4 x i32> < i32 0, i32 1, i32 6, i32 7 >
+ ret <4 x float> %tmp1642.i
+}
diff --git a/test/Transforms/InstCombine/xor.ll b/test/Transforms/InstCombine/xor.ll
new file mode 100644
index 0000000..e201149
--- /dev/null
+++ b/test/Transforms/InstCombine/xor.ll
@@ -0,0 +1,198 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep {xor }
+; END.
+
+%G1 = global uint 0
+%G2 = global uint 0
+
+implementation
+
+bool %test0(bool %A) {
+ %B = xor bool %A, false
+ ret bool %B
+}
+
+int %test1(int %A) {
+ %B = xor int %A, 0
+ ret int %B
+}
+
+bool %test2(bool %A) {
+ %B = xor bool %A, %A
+ ret bool %B
+}
+
+int %test3(int %A) {
+ %B = xor int %A, %A
+ ret int %B
+}
+
+int %test4(int %A) { ; A ^ ~A == -1
+ %NotA = xor int -1, %A
+ %B = xor int %A, %NotA
+ ret int %B
+}
+
+uint %test5(uint %A) { ; (A|B)^B == A & (~B)
+ %t1 = or uint %A, 123
+ %r = xor uint %t1, 123
+ ret uint %r
+}
+
+ubyte %test6(ubyte %A) {
+ %B = xor ubyte %A, 17
+ %C = xor ubyte %B, 17
+ ret ubyte %C
+}
+
+; (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+int %test7(int %A, int %B) {
+
+ %A1 = and int %A, 7
+ %B1 = and int %B, 128
+ %C1 = xor int %A1, %B1
+ ret int %C1
+}
+
+ubyte %test8(bool %c) {
+ %d = xor bool %c, true ; invert the condition
+ br bool %d, label %True, label %False
+True:
+ ret ubyte 1
+False:
+ ret ubyte 3
+}
+
+bool %test9(ubyte %A) {
+ %B = xor ubyte %A, 123 ; xor can be eliminated
+ %C = seteq ubyte %B, 34
+ ret bool %C
+}
+
+ubyte %test10(ubyte %A) {
+ %B = and ubyte %A, 3
+ %C = xor ubyte %B, 4 ; transform into an OR
+ ret ubyte %C
+}
+
+ubyte %test11(ubyte %A) {
+ %B = or ubyte %A, 12
+ %C = xor ubyte %B, 4 ; transform into an AND
+ ret ubyte %C
+}
+
+bool %test12(ubyte %A) {
+ %B = xor ubyte %A, 4
+ %c = setne ubyte %B, 0
+ ret bool %c
+}
+
+bool %test13(ubyte %A, ubyte %B) {
+ %C = setlt ubyte %A, %B
+ %D = setgt ubyte %A, %B
+ %E = xor bool %C, %D ; E = setne %A, %B
+ ret bool %E
+}
+
+bool %test14(ubyte %A, ubyte %B) {
+ %C = seteq ubyte %A, %B
+ %D = setne ubyte %B, %A
+ %E = xor bool %C, %D ; E = true
+ ret bool %E
+}
+
+uint %test15(uint %A) { ; ~(X-1) == -X
+ %B = add uint %A, 4294967295
+ %C = xor uint %B, 4294967295
+ ret uint %C
+}
+
+uint %test16(uint %A) { ; ~(X+c) == (-c-1)-X
+ %B = add uint %A, 123 ; A generalization of the previous case
+ %C = xor uint %B, 4294967295
+ ret uint %C
+}
+
+uint %test17(uint %A) { ; ~(c-X) == X-(c-1) == X+(-c+1)
+ %B = sub uint 123, %A
+ %C = xor uint %B, 4294967295
+ ret uint %C
+}
+
+uint %test18(uint %A) { ; C - ~X == X + (1+C)
+ %B = xor uint %A, 4294967295; -~X == 0 - ~X == X+1
+ %C = sub uint 123, %B
+ ret uint %C
+}
+
+uint %test19(uint %A, uint %B) {
+ %C = xor uint %A, %B
+ %D = xor uint %C, %A ; A terms cancel, D = B
+ ret uint %D
+}
+
+void %test20(uint %A, uint %B) { ; The "swap idiom"
+ %tmp.2 = xor uint %B, %A
+ %tmp.5 = xor uint %tmp.2, %B
+ %tmp.8 = xor uint %tmp.5, %tmp.2
+ store uint %tmp.8, uint* %G1 ; tmp.8 = B
+ store uint %tmp.5, uint* %G2 ; tmp.5 = A
+ ret void
+}
+
+int %test21(bool %C, int %A, int %B) {
+ %C2 = xor bool %C, true
+ %D = select bool %C2, int %A, int %B
+ ret int %D
+}
+
+int %test22(bool %X) {
+ %Y = xor bool %X, true
+ %Z = cast bool %Y to int
+ %Q = xor int %Z, 1
+ ret int %Q
+}
+
+bool %test23(int %a, int %b) {
+ %tmp.2 = xor int %b, %a
+ %tmp.4 = seteq int %tmp.2, %a
+ ret bool %tmp.4
+}
+
+bool %test24(int %c, int %d) {
+ %tmp.2 = xor int %d, %c
+ %tmp.4 = setne int %tmp.2, %c
+ ret bool %tmp.4
+}
+
+int %test25(int %g, int %h) {
+ %h2 = xor int %h, -1
+ %tmp2 = and int %h2, %g
+ %tmp4 = xor int %tmp2, %g ; (h2&g)^g -> ~h2 & g -> h & g
+ ret int %tmp4
+}
+
+int %test26(int %a, int %b) {
+ %b2 = xor int %b, -1
+ %tmp2 = xor int %a, %b2
+ %tmp4 = and int %tmp2, %a ; (a^b2)&a -> ~b2 & a -> b & a
+ ret int %tmp4
+}
+
+
+int %test27(int %b, int %c, int %d) {
+ %tmp2 = xor int %d, %b
+ %tmp5 = xor int %d, %c
+ %tmp = icmp eq int %tmp2, %tmp5
+ %tmp6 = zext bool %tmp to int
+ ret int %tmp6
+}
+
+int %test28(int %indvar) {
+ %tmp7 = add int %indvar, -2147483647
+ %tmp214 = xor int %tmp7, -2147483648
+ ret int %tmp214
+}
+
diff --git a/test/Transforms/InstCombine/xor2.ll b/test/Transforms/InstCombine/xor2.ll
new file mode 100644
index 0000000..efb3146
--- /dev/null
+++ b/test/Transforms/InstCombine/xor2.ll
@@ -0,0 +1,17 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep {xor }
+
+; PR1253
+define i1 @test0(i32 %A) {
+ %B = xor i32 %A, -2147483648
+ %C = icmp sgt i32 %B, -1
+ ret i1 %C
+}
+
+define i1 @test1(i32 %A) {
+ %B = xor i32 %A, 12345
+ %C = icmp slt i32 %B, 0
+ ret i1 %C
+}
+
diff --git a/test/Transforms/InstCombine/zeroext-and-reduce.ll b/test/Transforms/InstCombine/zeroext-and-reduce.ll
new file mode 100644
index 0000000..aac8c3b
--- /dev/null
+++ b/test/Transforms/InstCombine/zeroext-and-reduce.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep {and i32 %Y, 8}
+
+int %test1(ubyte %X) {
+ %Y = cast ubyte %X to int
+ %Z = and int %Y, 65544 ;; Prune this to and Y, 8
+ ret int %Z
+}
+
diff --git a/test/Transforms/InstCombine/zext.ll b/test/Transforms/InstCombine/zext.ll
new file mode 100644
index 0000000..cd1f1f8
--- /dev/null
+++ b/test/Transforms/InstCombine/zext.ll
@@ -0,0 +1,9 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: notcast {} {%c1.*}
+
+long %test_sext_zext(short %A) {
+ %c1 = zext short %A to uint
+ %c2 = sext uint %c1 to long
+ ret long %c2
+}