aboutsummaryrefslogtreecommitdiffstats
path: root/test/Transforms/InstCombine
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/InstCombine')
-rw-r--r--test/Transforms/InstCombine/2002-03-11-InstCombineHang.ll9
-rw-r--r--test/Transforms/InstCombine/2002-05-14-SubFailure.ll10
-rw-r--r--test/Transforms/InstCombine/2002-08-02-CastTest.ll11
-rw-r--r--test/Transforms/InstCombine/2002-12-05-MissedConstProp.ll11
-rw-r--r--test/Transforms/InstCombine/2003-05-26-CastMiscompile.ll8
-rw-r--r--test/Transforms/InstCombine/2003-05-27-ConstExprCrash.ll10
-rw-r--r--test/Transforms/InstCombine/2003-06-05-BranchInvertInfLoop.ll16
-rw-r--r--test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll44
-rw-r--r--test/Transforms/InstCombine/2003-08-12-AllocaNonNull.ll21
-rw-r--r--test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll7
-rw-r--r--test/Transforms/InstCombine/2003-10-29-CallSiteResolve.ll15
-rw-r--r--test/Transforms/InstCombine/2003-11-03-VarargsCallBug.ll13
-rw-r--r--test/Transforms/InstCombine/2003-11-13-ConstExprCastCall.ll12
-rw-r--r--test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll28
-rw-r--r--test/Transforms/InstCombine/2004-02-23-ShiftShiftOverflow.ll15
-rw-r--r--test/Transforms/InstCombine/2004-03-13-InstCombineInfLoop.ll13
-rw-r--r--test/Transforms/InstCombine/2004-04-04-InstCombineReplaceAllUsesWith.ll10
-rw-r--r--test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll10
-rw-r--r--test/Transforms/InstCombine/2004-07-27-ConstantExprMul.ll9
-rw-r--r--test/Transforms/InstCombine/2004-08-09-RemInfLoop.ll9
-rw-r--r--test/Transforms/InstCombine/2004-08-10-BoolSetCC.ll8
-rw-r--r--test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll18
-rw-r--r--test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll25
-rw-r--r--test/Transforms/InstCombine/2004-09-28-BadShiftAndSetCC.ll9
-rw-r--r--test/Transforms/InstCombine/2004-11-22-Missed-and-fold.ll10
-rw-r--r--test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll192
-rw-r--r--test/Transforms/InstCombine/2004-12-08-RemInfiniteLoop.ll7
-rw-r--r--test/Transforms/InstCombine/2005-03-04-ShiftOverflow.ll9
-rw-r--r--test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll8
-rw-r--r--test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll10
-rw-r--r--test/Transforms/InstCombine/2005-06-15-ShiftSetCCCrash.ll9
-rw-r--r--test/Transforms/InstCombine/2005-06-16-RangeCrash.ll9
-rw-r--r--test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll14
-rw-r--r--test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll14
-rw-r--r--test/Transforms/InstCombine/2006-02-13-DemandedMiscompile.ll10
-rw-r--r--test/Transforms/InstCombine/2006-02-28-Crash.ll8
-rw-r--r--test/Transforms/InstCombine/2006-03-30-ExtractElement.ll8
-rw-r--r--test/Transforms/InstCombine/2006-04-28-ShiftShiftLongLong.ll13
-rw-r--r--test/Transforms/InstCombine/2006-05-04-DemandedBitCrash.ll51
-rw-r--r--test/Transforms/InstCombine/2006-09-15-CastToBool.ll14
-rw-r--r--test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll10
-rw-r--r--test/Transforms/InstCombine/2006-10-20-mask.ll11
-rw-r--r--test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll9
-rw-r--r--test/Transforms/InstCombine/2006-11-03-Memmove64.ll16
-rw-r--r--test/Transforms/InstCombine/2006-11-10-ashr-miscompile.ll9
-rw-r--r--test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll9
-rw-r--r--test/Transforms/InstCombine/2006-12-05-fp-to-int-ext.ll12
-rw-r--r--test/Transforms/InstCombine/2006-12-08-ICmp-Combining.ll18
-rw-r--r--test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll51
-rw-r--r--test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll41
-rw-r--r--test/Transforms/InstCombine/2006-12-15-Range-Test.ll31
-rw-r--r--test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll30
-rw-r--r--test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll10
-rw-r--r--test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll6
-rw-r--r--test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll7
-rw-r--r--test/Transforms/InstCombine/2007-01-27-AndICmp.ll8
-rw-r--r--test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll45
-rw-r--r--test/Transforms/InstCombine/2007-02-07-PointerCast.ll22
-rw-r--r--test/Transforms/InstCombine/2007-02-23-PhiFoldInfLoop.ll31
-rw-r--r--test/Transforms/InstCombine/2007-03-13-CompareMerge.ll9
-rw-r--r--test/Transforms/InstCombine/2007-03-19-BadTruncChangePR1261.ll10
-rw-r--r--test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll7
-rw-r--r--test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll29
-rw-r--r--test/Transforms/InstCombine/2007-03-25-DoubleShift.ll9
-rw-r--r--test/Transforms/InstCombine/2007-03-26-BadShiftMask.ll36
-rw-r--r--test/Transforms/InstCombine/2007-04-04-BadFoldBitcastIntoMalloc.ll19
-rw-r--r--test/Transforms/InstCombine/2007-04-08-SingleEltVectorCrash.ll7
-rw-r--r--test/Transforms/InstCombine/2007-05-04-Crash.ll30
-rw-r--r--test/Transforms/InstCombine/2007-05-10-icmp-or.ll8
-rw-r--r--test/Transforms/InstCombine/2007-05-14-Crash.ll18
-rw-r--r--test/Transforms/InstCombine/2007-05-18-CastFoldBug.ll10
-rw-r--r--test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll22
-rw-r--r--test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll9
-rw-r--r--test/Transforms/InstCombine/2007-08-02-InfiniteLoop.ll10
-rw-r--r--test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll13
-rw-r--r--test/Transforms/InstCombine/2007-09-11-Trampoline.ll24
-rw-r--r--test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll14
-rw-r--r--test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll21
-rw-r--r--test/Transforms/InstCombine/2007-10-12-Crash.ll38
-rw-r--r--test/Transforms/InstCombine/2007-10-28-stacksave.ll47
-rw-r--r--test/Transforms/InstCombine/2007-10-31-RangeCrash.ll35
-rw-r--r--test/Transforms/InstCombine/2007-10-31-StringCrash.ll21
-rw-r--r--test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll22
-rw-r--r--test/Transforms/InstCombine/2007-11-15-CompareMiscomp.ll10
-rw-r--r--test/Transforms/InstCombine/2007-11-22-IcmpCrash.ll16
-rw-r--r--test/Transforms/InstCombine/2007-11-25-CompatibleAttributes.ll12
-rw-r--r--test/Transforms/InstCombine/2007-12-10-ConstFoldCompare.ll9
-rw-r--r--test/Transforms/InstCombine/2007-12-12-GEPScale.ll10
-rw-r--r--test/Transforms/InstCombine/2007-12-16-AsmNoUnwind.ll7
-rw-r--r--test/Transforms/InstCombine/2007-12-18-AddSelCmpSub.ll29
-rw-r--r--test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll89
-rw-r--r--test/Transforms/InstCombine/2008-01-06-BitCastAttributes.ll23
-rw-r--r--test/Transforms/InstCombine/2008-01-06-CastCrash.ll10
-rw-r--r--test/Transforms/InstCombine/2008-01-06-VoidCast.ll10
-rw-r--r--test/Transforms/InstCombine/2008-01-13-AndCmpCmp.ll9
-rw-r--r--test/Transforms/InstCombine/2008-01-13-NoBitCastAttributes.ll15
-rw-r--r--test/Transforms/InstCombine/2008-01-14-DoubleNest.ll24
-rw-r--r--test/Transforms/InstCombine/2008-01-14-VarArgTrampoline.ll24
-rw-r--r--test/Transforms/InstCombine/2008-01-21-MismatchedCastAndCompare.ll20
-rw-r--r--test/Transforms/InstCombine/2008-01-21-MulTrunc.ll17
-rw-r--r--test/Transforms/InstCombine/2008-01-27-FloatSelect.ll7
-rw-r--r--test/Transforms/InstCombine/2008-01-29-AddICmp.ll20
-rw-r--r--test/Transforms/InstCombine/2008-02-13-MulURem.ll8
-rw-r--r--test/Transforms/InstCombine/2008-02-16-SDivOverflow.ll14
-rw-r--r--test/Transforms/InstCombine/2008-02-16-SDivOverflow2.ll9
-rw-r--r--test/Transforms/InstCombine/2008-02-23-MulSub.ll9
-rw-r--r--test/Transforms/InstCombine/2008-02-28-OrFCmpCrash.ll16
-rw-r--r--test/Transforms/InstCombine/2008-03-13-IntToPtr.ll9
-rw-r--r--test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll15
-rw-r--r--test/Transforms/InstCombine/2008-04-28-VolatileStore.ll8
-rw-r--r--test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll25
-rw-r--r--test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll21
-rw-r--r--test/Transforms/InstCombine/2008-05-08-LiveStoreDelete.ll25
-rw-r--r--test/Transforms/InstCombine/2008-05-08-StrLenSink.ll32
-rw-r--r--test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll33
-rw-r--r--test/Transforms/InstCombine/2008-05-17-InfLoop.ll23
-rw-r--r--test/Transforms/InstCombine/2008-05-18-FoldIntToPtr.ll13
-rw-r--r--test/Transforms/InstCombine/2008-05-22-IDivVector.ll6
-rw-r--r--test/Transforms/InstCombine/2008-05-22-NegValVector.ll8
-rw-r--r--test/Transforms/InstCombine/2008-05-23-CompareFold.ll11
-rw-r--r--test/Transforms/InstCombine/2008-05-31-AddBool.ll7
-rw-r--r--test/Transforms/InstCombine/2008-05-31-Bools.ll24
-rw-r--r--test/Transforms/InstCombine/2008-06-05-ashr-crash.ll7
-rw-r--r--test/Transforms/InstCombine/2008-06-08-ICmpPHI.ll47
-rw-r--r--test/Transforms/InstCombine/2008-06-13-InfiniteLoopStore.ll20
-rw-r--r--test/Transforms/InstCombine/2008-06-13-ReadOnlyCallStore.ll19
-rw-r--r--test/Transforms/InstCombine/2008-06-19-UncondLoad.ll16
-rw-r--r--test/Transforms/InstCombine/2008-06-21-CompareMiscomp.ll11
-rw-r--r--test/Transforms/InstCombine/2008-06-24-StackRestore.ll39
-rw-r--r--test/Transforms/InstCombine/2008-07-08-AndICmp.ll10
-rw-r--r--test/Transforms/InstCombine/2008-07-08-ShiftOneAndOne.ll10
-rw-r--r--test/Transforms/InstCombine/2008-07-08-SubAnd.ll9
-rw-r--r--test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll26
-rw-r--r--test/Transforms/InstCombine/2008-07-09-SubAndError.ll9
-rw-r--r--test/Transforms/InstCombine/2008-07-10-CastSextBool.ll17
-rw-r--r--test/Transforms/InstCombine/2008-07-10-ICmpBinOp.ll19
-rw-r--r--test/Transforms/InstCombine/2008-07-11-RemAnd.ll9
-rw-r--r--test/Transforms/InstCombine/2008-07-13-DivZero.ll16
-rw-r--r--test/Transforms/InstCombine/2008-07-16-fsub.ll8
-rw-r--r--test/Transforms/InstCombine/2008-07-16-sse2_storel_dq.ll13
-rw-r--r--test/Transforms/InstCombine/2008-08-05-And.ll23
-rw-r--r--test/Transforms/InstCombine/2008-08-17-ICmpXorSignbit.ll41
-rw-r--r--test/Transforms/InstCombine/2008-09-02-VectorCrash.ll27
-rw-r--r--test/Transforms/InstCombine/2008-09-29-FoldingOr.ll10
-rw-r--r--test/Transforms/InstCombine/2008-10-11-DivCompareFold.ll8
-rw-r--r--test/Transforms/InstCombine/2008-10-23-ConstFoldWithoutMask.ll8
-rw-r--r--test/Transforms/InstCombine/2008-11-01-SRemDemandedBits.ll8
-rw-r--r--test/Transforms/InstCombine/2008-11-08-FCmp.ll47
-rw-r--r--test/Transforms/InstCombine/2008-11-20-DivMulRem.ll34
-rw-r--r--test/Transforms/InstCombine/2008-11-27-IDivVector.ll11
-rw-r--r--test/Transforms/InstCombine/2008-11-27-MultiplyIntVec.ll11
-rw-r--r--test/Transforms/InstCombine/2008-11-27-UDivNegative.ll6
-rw-r--r--test/Transforms/InstCombine/2008-12-17-SRemNegConstVec.ll7
-rw-r--r--test/Transforms/InstCombine/2009-01-05-i128-crash.ll27
-rw-r--r--test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll28
-rw-r--r--test/Transforms/InstCombine/2009-01-16-PointerAddrSpace.ll11
-rw-r--r--test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll316
-rw-r--r--test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll75
-rw-r--r--test/Transforms/InstCombine/2009-01-24-EmptyStruct.ll18
-rw-r--r--test/Transforms/InstCombine/2009-01-31-InfIterate.ll22
-rw-r--r--test/Transforms/InstCombine/2009-01-31-Pressure.ll22
-rw-r--r--test/Transforms/InstCombine/2009-02-04-FPBitcast.ll12
-rw-r--r--test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll278
-rw-r--r--test/Transforms/InstCombine/2009-02-21-LoadCST.ll12
-rw-r--r--test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll38
-rw-r--r--test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll11
-rw-r--r--test/Transforms/InstCombine/2009-03-20-AShrOverShift.ll9
-rw-r--r--test/Transforms/InstCombine/2009-03-24-InfLoop.ll9
-rw-r--r--test/Transforms/InstCombine/2009-04-07-MulPromoteToI96.ll13
-rw-r--r--test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll9
-rw-r--r--test/Transforms/InstCombine/2009-06-11-StoreAddrSpace.ll7
-rw-r--r--test/Transforms/InstCombine/2009-06-16-SRemDemandedBits.ll9
-rw-r--r--test/Transforms/InstCombine/2009-07-02-MaskedIntVector.ll15
-rw-r--r--test/Transforms/InstCombine/2009-12-17-CmpSelectNull.ll16
-rw-r--r--test/Transforms/InstCombine/2010-01-28-NegativeSRem.ll19
-rw-r--r--test/Transforms/InstCombine/CPP_min_max.ll34
-rw-r--r--test/Transforms/InstCombine/IntPtrCast.ll10
-rw-r--r--test/Transforms/InstCombine/JavaCompare.ll14
-rw-r--r--test/Transforms/InstCombine/README.txt4
-rw-r--r--test/Transforms/InstCombine/add-shrink.ll14
-rw-r--r--test/Transforms/InstCombine/add-sitofp.ll9
-rw-r--r--test/Transforms/InstCombine/add.ll277
-rw-r--r--test/Transforms/InstCombine/add2.ll24
-rw-r--r--test/Transforms/InstCombine/add3.ll21
-rw-r--r--test/Transforms/InstCombine/addnegneg.ll12
-rw-r--r--test/Transforms/InstCombine/adjust-for-sminmax.ll85
-rw-r--r--test/Transforms/InstCombine/align-2d-gep.ll44
-rw-r--r--test/Transforms/InstCombine/align-addr.ll31
-rw-r--r--test/Transforms/InstCombine/align-external.ll22
-rw-r--r--test/Transforms/InstCombine/align-inc.ll12
-rw-r--r--test/Transforms/InstCombine/alloca.ll32
-rw-r--r--test/Transforms/InstCombine/and-compare.ll11
-rw-r--r--test/Transforms/InstCombine/and-fcmp.ll34
-rw-r--r--test/Transforms/InstCombine/and-not-or.ll34
-rw-r--r--test/Transforms/InstCombine/and-or-and.ll61
-rw-r--r--test/Transforms/InstCombine/and-or-not.ll46
-rw-r--r--test/Transforms/InstCombine/and-or.ll39
-rw-r--r--test/Transforms/InstCombine/and-xor-merge.ll19
-rw-r--r--test/Transforms/InstCombine/and.ll255
-rw-r--r--test/Transforms/InstCombine/and2.ll12
-rw-r--r--test/Transforms/InstCombine/apint-add1.ll34
-rw-r--r--test/Transforms/InstCombine/apint-add2.ll46
-rw-r--r--test/Transforms/InstCombine/apint-and-compare.ll16
-rw-r--r--test/Transforms/InstCombine/apint-and-or-and.ll50
-rw-r--r--test/Transforms/InstCombine/apint-and-xor-merge.ll22
-rw-r--r--test/Transforms/InstCombine/apint-and1.ll57
-rw-r--r--test/Transforms/InstCombine/apint-and2.ll82
-rw-r--r--test/Transforms/InstCombine/apint-call-cast-target.ll17
-rw-r--r--test/Transforms/InstCombine/apint-cast-and-cast.ll15
-rw-r--r--test/Transforms/InstCombine/apint-cast-cast-to-and.ll8
-rw-r--r--test/Transforms/InstCombine/apint-cast.ll30
-rw-r--r--test/Transforms/InstCombine/apint-div1.ll22
-rw-r--r--test/Transforms/InstCombine/apint-div2.ll22
-rw-r--r--test/Transforms/InstCombine/apint-mul1.ll11
-rw-r--r--test/Transforms/InstCombine/apint-mul2.ll12
-rw-r--r--test/Transforms/InstCombine/apint-not.ll42
-rw-r--r--test/Transforms/InstCombine/apint-or1.ll36
-rw-r--r--test/Transforms/InstCombine/apint-or2.ll35
-rw-r--r--test/Transforms/InstCombine/apint-rem1.ll22
-rw-r--r--test/Transforms/InstCombine/apint-rem2.ll22
-rw-r--r--test/Transforms/InstCombine/apint-select.ll44
-rw-r--r--test/Transforms/InstCombine/apint-shift-simplify.ll23
-rw-r--r--test/Transforms/InstCombine/apint-shift.ll184
-rw-r--r--test/Transforms/InstCombine/apint-shl-trunc.ll14
-rw-r--r--test/Transforms/InstCombine/apint-sub.ll141
-rw-r--r--test/Transforms/InstCombine/apint-xor1.ll50
-rw-r--r--test/Transforms/InstCombine/apint-xor2.ll51
-rw-r--r--test/Transforms/InstCombine/apint-zext1.ll11
-rw-r--r--test/Transforms/InstCombine/apint-zext2.ll11
-rw-r--r--test/Transforms/InstCombine/ashr-nop.ll8
-rw-r--r--test/Transforms/InstCombine/badmalloc.ll19
-rw-r--r--test/Transforms/InstCombine/binop-cast.ll9
-rw-r--r--test/Transforms/InstCombine/bit-tracking.ll26
-rw-r--r--test/Transforms/InstCombine/bitcast-scalar-to-vector.ll14
-rw-r--r--test/Transforms/InstCombine/bitcast-sext-vector.ll11
-rw-r--r--test/Transforms/InstCombine/bitcast-vec-canon.ll22
-rw-r--r--test/Transforms/InstCombine/bitcast-vector-fold.ll33
-rw-r--r--test/Transforms/InstCombine/bitcount.ll19
-rw-r--r--test/Transforms/InstCombine/bittest.ll30
-rw-r--r--test/Transforms/InstCombine/bswap-fold.ll69
-rw-r--r--test/Transforms/InstCombine/bswap.ll74
-rw-r--r--test/Transforms/InstCombine/call-cast-target.ll14
-rw-r--r--test/Transforms/InstCombine/call-intrinsics.ll19
-rw-r--r--test/Transforms/InstCombine/call.ll118
-rw-r--r--test/Transforms/InstCombine/call2.ll27
-rw-r--r--test/Transforms/InstCombine/canonicalize_branch.ll44
-rw-r--r--test/Transforms/InstCombine/cast-mul-select.ll41
-rw-r--r--test/Transforms/InstCombine/cast-set.ll65
-rw-r--r--test/Transforms/InstCombine/cast.ll607
-rw-r--r--test/Transforms/InstCombine/cast_ptr.ll79
-rw-r--r--test/Transforms/InstCombine/compare-signs.ll58
-rw-r--r--test/Transforms/InstCombine/constant-fold-compare.ll8
-rw-r--r--test/Transforms/InstCombine/constant-fold-gep.ll55
-rw-r--r--test/Transforms/InstCombine/constant-fold-ptr-casts.ll18
-rw-r--r--test/Transforms/InstCombine/crash.ll239
-rw-r--r--test/Transforms/InstCombine/dce-iterate.ll24
-rw-r--r--test/Transforms/InstCombine/deadcode.ll24
-rw-r--r--test/Transforms/InstCombine/dg.exp3
-rw-r--r--test/Transforms/InstCombine/div-cmp-overflow.ll8
-rw-r--r--test/Transforms/InstCombine/div.ll84
-rw-r--r--test/Transforms/InstCombine/enforce-known-alignment.ll18
-rw-r--r--test/Transforms/InstCombine/exact-sdiv.ll52
-rw-r--r--test/Transforms/InstCombine/extractvalue.ll38
-rw-r--r--test/Transforms/InstCombine/fold-bin-operand.ll14
-rw-r--r--test/Transforms/InstCombine/fold-vector-zero.ll35
-rw-r--r--test/Transforms/InstCombine/fp-ret-bitcast.ll28
-rw-r--r--test/Transforms/InstCombine/fpcast.ll15
-rw-r--r--test/Transforms/InstCombine/fpextend.ll36
-rw-r--r--test/Transforms/InstCombine/fsub.ll23
-rw-r--r--test/Transforms/InstCombine/getelementptr.ll470
-rw-r--r--test/Transforms/InstCombine/hoist_instr.ll18
-rw-r--r--test/Transforms/InstCombine/icmp.ll123
-rw-r--r--test/Transforms/InstCombine/idioms.ll32
-rw-r--r--test/Transforms/InstCombine/intrinsics.ll161
-rw-r--r--test/Transforms/InstCombine/invariant.ll16
-rw-r--r--test/Transforms/InstCombine/known_align.ll27
-rw-r--r--test/Transforms/InstCombine/load-cmp.ll112
-rw-r--r--test/Transforms/InstCombine/load-select.ll16
-rw-r--r--test/Transforms/InstCombine/load.ll87
-rw-r--r--test/Transforms/InstCombine/load2.ll11
-rw-r--r--test/Transforms/InstCombine/load3.ll14
-rw-r--r--test/Transforms/InstCombine/loadstore-alignment.ll67
-rw-r--r--test/Transforms/InstCombine/logical-select.ll68
-rw-r--r--test/Transforms/InstCombine/lshr-phi.ll35
-rw-r--r--test/Transforms/InstCombine/malloc-free-delete.ll13
-rw-r--r--test/Transforms/InstCombine/malloc.ll7
-rw-r--r--test/Transforms/InstCombine/malloc2.ll22
-rw-r--r--test/Transforms/InstCombine/malloc3.ll26
-rw-r--r--test/Transforms/InstCombine/memcpy-to-load.ll14
-rw-r--r--test/Transforms/InstCombine/memcpy.ll10
-rw-r--r--test/Transforms/InstCombine/memmove.ll42
-rw-r--r--test/Transforms/InstCombine/memset.ll15
-rw-r--r--test/Transforms/InstCombine/mul-masked-bits.ll10
-rw-r--r--test/Transforms/InstCombine/mul.ll116
-rw-r--r--test/Transforms/InstCombine/multi-use-or.ll24
-rw-r--r--test/Transforms/InstCombine/narrow.ll18
-rw-r--r--test/Transforms/InstCombine/no-negzero.ll33
-rw-r--r--test/Transforms/InstCombine/not-fcmp.ll10
-rw-r--r--test/Transforms/InstCombine/not.ll54
-rw-r--r--test/Transforms/InstCombine/nothrow.ll8
-rw-r--r--test/Transforms/InstCombine/nsw.ll20
-rw-r--r--test/Transforms/InstCombine/objsize.ll52
-rw-r--r--test/Transforms/InstCombine/odr-linkage.ll19
-rw-r--r--test/Transforms/InstCombine/or-fcmp.ll34
-rw-r--r--test/Transforms/InstCombine/or-to-xor.ll42
-rw-r--r--test/Transforms/InstCombine/or.ll352
-rw-r--r--test/Transforms/InstCombine/phi-merge-gep.ll102
-rw-r--r--test/Transforms/InstCombine/phi.ll364
-rw-r--r--test/Transforms/InstCombine/pr2645-0.ll33
-rw-r--r--test/Transforms/InstCombine/pr2645-1.ll39
-rw-r--r--test/Transforms/InstCombine/pr2996.ll12
-rw-r--r--test/Transforms/InstCombine/preserve-sminmax.ll32
-rw-r--r--test/Transforms/InstCombine/ptr-int-cast.ll17
-rw-r--r--test/Transforms/InstCombine/rem.ll83
-rw-r--r--test/Transforms/InstCombine/sdiv-1.ll22
-rw-r--r--test/Transforms/InstCombine/sdiv-2.ll28
-rw-r--r--test/Transforms/InstCombine/sdiv-shift.ll9
-rw-r--r--test/Transforms/InstCombine/select-2.ll18
-rw-r--r--test/Transforms/InstCombine/select-load-call.ll15
-rw-r--r--test/Transforms/InstCombine/select.ll440
-rw-r--r--test/Transforms/InstCombine/set.ll171
-rw-r--r--test/Transforms/InstCombine/setcc-strength-reduce.ll37
-rw-r--r--test/Transforms/InstCombine/sext.ll128
-rw-r--r--test/Transforms/InstCombine/shift-simplify.ll42
-rw-r--r--test/Transforms/InstCombine/shift-sra.ll58
-rw-r--r--test/Transforms/InstCombine/shift-trunc-shift.ll10
-rw-r--r--test/Transforms/InstCombine/shift.ll345
-rw-r--r--test/Transforms/InstCombine/shufflemask-undef.ll109
-rw-r--r--test/Transforms/InstCombine/shufflevec-constant.ll14
-rw-r--r--test/Transforms/InstCombine/signed-comparison.ll28
-rw-r--r--test/Transforms/InstCombine/signext.ll87
-rw-r--r--test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll84
-rw-r--r--test/Transforms/InstCombine/sink_instruction.ll56
-rw-r--r--test/Transforms/InstCombine/sitofp.ll55
-rw-r--r--test/Transforms/InstCombine/srem-simplify-bug.ll9
-rw-r--r--test/Transforms/InstCombine/srem.ll8
-rw-r--r--test/Transforms/InstCombine/srem1.ll18
-rw-r--r--test/Transforms/InstCombine/stack-overalign.ll29
-rw-r--r--test/Transforms/InstCombine/stacksaverestore.ll56
-rw-r--r--test/Transforms/InstCombine/store.ll85
-rw-r--r--test/Transforms/InstCombine/sub.ll283
-rw-r--r--test/Transforms/InstCombine/trunc-mask-ext.ll38
-rw-r--r--test/Transforms/InstCombine/udiv-simplify-bug-0.ll14
-rw-r--r--test/Transforms/InstCombine/udiv-simplify-bug-1.ll20
-rw-r--r--test/Transforms/InstCombine/udiv_select_to_select_shift.ll17
-rw-r--r--test/Transforms/InstCombine/udivrem-change-width.ll21
-rw-r--r--test/Transforms/InstCombine/urem-simplify-bug.ll32
-rw-r--r--test/Transforms/InstCombine/urem.ll8
-rw-r--r--test/Transforms/InstCombine/vec_demanded_elts-2.ll19
-rw-r--r--test/Transforms/InstCombine/vec_demanded_elts-3.ll14
-rw-r--r--test/Transforms/InstCombine/vec_demanded_elts.ll47
-rw-r--r--test/Transforms/InstCombine/vec_extract_elt.ll9
-rw-r--r--test/Transforms/InstCombine/vec_insertelt.ll7
-rw-r--r--test/Transforms/InstCombine/vec_narrow.ll12
-rw-r--r--test/Transforms/InstCombine/vec_shuffle.ll89
-rw-r--r--test/Transforms/InstCombine/vector-casts.ll107
-rw-r--r--test/Transforms/InstCombine/vector-srem.ll9
-rw-r--r--test/Transforms/InstCombine/volatile_store.ll14
-rw-r--r--test/Transforms/InstCombine/xor-undef.ll6
-rw-r--r--test/Transforms/InstCombine/xor.ll193
-rw-r--r--test/Transforms/InstCombine/xor2.ll53
-rw-r--r--test/Transforms/InstCombine/zero-point-zero-add.ll15
-rw-r--r--test/Transforms/InstCombine/zeroext-and-reduce.ll10
-rw-r--r--test/Transforms/InstCombine/zext-bool-add-sub.ll29
-rw-r--r--test/Transforms/InstCombine/zext-fold.ll12
-rw-r--r--test/Transforms/InstCombine/zext-or-icmp.ll35
-rw-r--r--test/Transforms/InstCombine/zext.ll11
367 files changed, 14149 insertions, 0 deletions
diff --git a/test/Transforms/InstCombine/2002-03-11-InstCombineHang.ll b/test/Transforms/InstCombine/2002-03-11-InstCombineHang.ll
new file mode 100644
index 0000000..5d027a7
--- /dev/null
+++ b/test/Transforms/InstCombine/2002-03-11-InstCombineHang.ll
@@ -0,0 +1,9 @@
+; This testcase causes instcombine to hang.
+;
+; RUN: opt < %s -instcombine
+
+define void @test(i32 %X) {
+ %reg117 = add i32 %X, 0 ; <i32> [#uses=0]
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/2002-05-14-SubFailure.ll b/test/Transforms/InstCombine/2002-05-14-SubFailure.ll
new file mode 100644
index 0000000..d2b2b00
--- /dev/null
+++ b/test/Transforms/InstCombine/2002-05-14-SubFailure.ll
@@ -0,0 +1,10 @@
+; Instcombine was missing a test that caused it to make illegal transformations
+; sometimes. In this case, it transforms the sub into an add:
+; RUN: opt < %s -instcombine -S | grep sub
+;
+define i32 @test(i32 %i, i32 %j) {
+ %A = mul i32 %i, %j
+ %B = sub i32 2, %A
+ ret i32 %B
+}
+
diff --git a/test/Transforms/InstCombine/2002-08-02-CastTest.ll b/test/Transforms/InstCombine/2002-08-02-CastTest.ll
new file mode 100644
index 0000000..363cb21
--- /dev/null
+++ b/test/Transforms/InstCombine/2002-08-02-CastTest.ll
@@ -0,0 +1,11 @@
+; This testcase is incorrectly getting completely eliminated. There should be
+; SOME instruction named %c here, even if it's a bitwise and.
+;
+; RUN: opt < %s -instcombine -S | grep %c
+;
+define i64 @test3(i64 %A) {
+ %c1 = trunc i64 %A to i8 ; <i8> [#uses=1]
+ %c2 = zext i8 %c1 to i64 ; <i64> [#uses=1]
+ ret i64 %c2
+}
+
diff --git a/test/Transforms/InstCombine/2002-12-05-MissedConstProp.ll b/test/Transforms/InstCombine/2002-12-05-MissedConstProp.ll
new file mode 100644
index 0000000..22574f7
--- /dev/null
+++ b/test/Transforms/InstCombine/2002-12-05-MissedConstProp.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | not grep add
+
+define i32 @test(i32 %A) {
+ %A.neg = sub i32 0, %A ; <i32> [#uses=1]
+ %.neg = sub i32 0, 1 ; <i32> [#uses=1]
+ %X = add i32 %.neg, 1 ; <i32> [#uses=1]
+ %Y.neg.ra = add i32 %A, %X ; <i32> [#uses=1]
+ %r = add i32 %A.neg, %Y.neg.ra ; <i32> [#uses=1]
+ ret i32 %r
+}
+
diff --git a/test/Transforms/InstCombine/2003-05-26-CastMiscompile.ll b/test/Transforms/InstCombine/2003-05-26-CastMiscompile.ll
new file mode 100644
index 0000000..19010d2
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-05-26-CastMiscompile.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep 4294967295
+
+define i64 @test(i64 %Val) {
+ %tmp.3 = trunc i64 %Val to i32 ; <i32> [#uses=1]
+ %tmp.8 = zext i32 %tmp.3 to i64 ; <i64> [#uses=1]
+ ret i64 %tmp.8
+}
+
diff --git a/test/Transforms/InstCombine/2003-05-27-ConstExprCrash.ll b/test/Transforms/InstCombine/2003-05-27-ConstExprCrash.ll
new file mode 100644
index 0000000..8645249
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-05-27-ConstExprCrash.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+
+@X = global i32 5 ; <i32*> [#uses=1]
+
+define i64 @test() {
+ %C = add i64 1, 2 ; <i64> [#uses=1]
+ %V = add i64 ptrtoint (i32* @X to i64), %C ; <i64> [#uses=1]
+ ret i64 %V
+}
+
diff --git a/test/Transforms/InstCombine/2003-06-05-BranchInvertInfLoop.ll b/test/Transforms/InstCombine/2003-06-05-BranchInvertInfLoop.ll
new file mode 100644
index 0000000..154f3ba
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-06-05-BranchInvertInfLoop.ll
@@ -0,0 +1,16 @@
+; This testcase causes an infinite loop in the instruction combiner,
+; because it things that the constant value is a not expression... and
+; constantly inverts the branch back and forth.
+;
+; RUN: opt < %s -instcombine -disable-output
+
+define i8 @test19(i1 %c) {
+ br i1 true, label %True, label %False
+
+True: ; preds = %0
+ ret i8 1
+
+False: ; preds = %0
+ ret i8 3
+}
+
diff --git a/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll b/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll
new file mode 100644
index 0000000..f550c83
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll
@@ -0,0 +1,44 @@
+;
+; Test: ExternalConstant
+;
+; Description:
+; This regression test helps check whether the instruction combining
+; optimization pass correctly handles global variables which are marked
+; as external and constant.
+;
+; If a problem occurs, we should die on an assert(). Otherwise, we
+; should pass through the optimizer without failure.
+;
+; Extra code:
+; RUN: opt < %s -instcombine
+; END.
+
+target datalayout = "e-p:32:32"
+@silly = external constant i32 ; <i32*> [#uses=1]
+
+declare void @bzero(i8*, i32)
+
+declare void @bcopy(i8*, i8*, i32)
+
+declare i32 @bcmp(i8*, i8*, i32)
+
+declare i32 @fputs(i8*, i8*)
+
+declare i32 @fputs_unlocked(i8*, i8*)
+
+define i32 @function(i32 %a.1) {
+entry:
+ %a.0 = alloca i32 ; <i32*> [#uses=2]
+ %result = alloca i32 ; <i32*> [#uses=2]
+ store i32 %a.1, i32* %a.0
+ %tmp.0 = load i32* %a.0 ; <i32> [#uses=1]
+ %tmp.1 = load i32* @silly ; <i32> [#uses=1]
+ %tmp.2 = add i32 %tmp.0, %tmp.1 ; <i32> [#uses=1]
+ store i32 %tmp.2, i32* %result
+ br label %return
+
+return: ; preds = %entry
+ %tmp.3 = load i32* %result ; <i32> [#uses=1]
+ ret i32 %tmp.3
+}
+
diff --git a/test/Transforms/InstCombine/2003-08-12-AllocaNonNull.ll b/test/Transforms/InstCombine/2003-08-12-AllocaNonNull.ll
new file mode 100644
index 0000000..6d22754
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-08-12-AllocaNonNull.ll
@@ -0,0 +1,21 @@
+; This testcase can be simplified by "realizing" that alloca can never return
+; null.
+; RUN: opt < %s -instcombine -simplifycfg | \
+; RUN: llvm-dis | not grep br
+
+declare i32 @bitmap_clear(...)
+
+define i32 @oof() {
+entry:
+ %live_head = alloca i32 ; <i32*> [#uses=2]
+ %tmp.1 = icmp ne i32* %live_head, null ; <i1> [#uses=1]
+ br i1 %tmp.1, label %then, label %UnifiedExitNode
+
+then: ; preds = %entry
+ %tmp.4 = call i32 (...)* @bitmap_clear( i32* %live_head ) ; <i32> [#uses=0]
+ br label %UnifiedExitNode
+
+UnifiedExitNode: ; preds = %then, %entry
+ ret i32 0
+}
+
diff --git a/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll b/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll
new file mode 100644
index 0000000..3297919
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep load
+
+define void @test(i32* %P) {
+ ; Dead but not deletable!
+ %X = volatile load i32* %P ; <i32> [#uses=0]
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2003-10-29-CallSiteResolve.ll b/test/Transforms/InstCombine/2003-10-29-CallSiteResolve.ll
new file mode 100644
index 0000000..cfe5df6
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-10-29-CallSiteResolve.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -disable-output
+
+declare i32* @bar()
+
+define float* @foo() {
+ %tmp.11 = invoke float* bitcast (i32* ()* @bar to float* ()*)( )
+ to label %invoke_cont unwind label %X ; <float*> [#uses=1]
+
+invoke_cont: ; preds = %0
+ ret float* %tmp.11
+
+X: ; preds = %0
+ ret float* null
+}
+
diff --git a/test/Transforms/InstCombine/2003-11-03-VarargsCallBug.ll b/test/Transforms/InstCombine/2003-11-03-VarargsCallBug.ll
new file mode 100644
index 0000000..c1692f7
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-11-03-VarargsCallBug.ll
@@ -0,0 +1,13 @@
+; The cast in this testcase is not eliminable on a 32-bit target!
+; RUN: opt < %s -instcombine -S | grep inttoptr
+
+target datalayout = "e-p:32:32"
+
+declare void @foo(...)
+
+define void @test(i64 %X) {
+ %Y = inttoptr i64 %X to i32* ; <i32*> [#uses=1]
+ call void (...)* @foo( i32* %Y )
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/2003-11-13-ConstExprCastCall.ll b/test/Transforms/InstCombine/2003-11-13-ConstExprCastCall.ll
new file mode 100644
index 0000000..fdb8fd9
--- /dev/null
+++ b/test/Transforms/InstCombine/2003-11-13-ConstExprCastCall.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+declare void @free(i8*)
+
+define void @test(i32* %X) {
+ call void (...)* bitcast (void (i8*)* @free to void (...)*)( i32* %X ) ; <i32>:1 [#uses=0]
+; CHECK: %tmp = bitcast i32* %X to i8*
+; CHECK: call void @free(i8* %tmp)
+ ret void
+; CHECK: ret void
+}
diff --git a/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll b/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll
new file mode 100644
index 0000000..bec0b9e
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll
@@ -0,0 +1,28 @@
+; Test for a problem afflicting several C++ programs in the testsuite. The
+; instcombine pass is trying to get rid of the cast in the invoke instruction,
+; inserting a cast of the return value after the PHI instruction, but which is
+; used by the PHI instruction. This is bad: because of the semantics of the
+; invoke instruction, we really cannot perform this transformation at all at
+; least without splitting the critical edge.
+;
+; RUN: opt < %s -instcombine -disable-output
+
+declare i8* @test()
+
+define i32 @foo() {
+entry:
+ br i1 true, label %cont, label %call
+
+call: ; preds = %entry
+ %P = invoke i32* bitcast (i8* ()* @test to i32* ()*)( )
+ to label %cont unwind label %N ; <i32*> [#uses=1]
+
+cont: ; preds = %call, %entry
+ %P2 = phi i32* [ %P, %call ], [ null, %entry ] ; <i32*> [#uses=1]
+ %V = load i32* %P2 ; <i32> [#uses=1]
+ ret i32 %V
+
+N: ; preds = %call
+ ret i32 0
+}
+
diff --git a/test/Transforms/InstCombine/2004-02-23-ShiftShiftOverflow.ll b/test/Transforms/InstCombine/2004-02-23-ShiftShiftOverflow.ll
new file mode 100644
index 0000000..a08e3a8
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-02-23-ShiftShiftOverflow.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | not grep 34
+
+define i32 @test(i32 %X) {
+ ; Do not fold into shr X, 34, as this uses undefined behavior!
+ %Y = ashr i32 %X, 17 ; <i32> [#uses=1]
+ %Z = ashr i32 %Y, 17 ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test2(i32 %X) {
+ ; Do not fold into shl X, 34, as this uses undefined behavior!
+ %Y = shl i32 %X, 17 ; <i32> [#uses=1]
+ %Z = shl i32 %Y, 17 ; <i32> [#uses=1]
+ ret i32 %Z
+}
diff --git a/test/Transforms/InstCombine/2004-03-13-InstCombineInfLoop.ll b/test/Transforms/InstCombine/2004-03-13-InstCombineInfLoop.ll
new file mode 100644
index 0000000..ff20d7d
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-03-13-InstCombineInfLoop.ll
@@ -0,0 +1,13 @@
+; This testcase caused the combiner to go into an infinite loop, moving the
+; cast back and forth, changing the seteq to operate on int vs uint and back.
+
+; RUN: opt < %s -instcombine -disable-output
+
+define i1 @test(i32 %A, i32 %B) {
+ %C = sub i32 0, %A ; <i32> [#uses=1]
+ %Cc = bitcast i32 %C to i32 ; <i32> [#uses=1]
+ %D = sub i32 0, %B ; <i32> [#uses=1]
+ %E = icmp eq i32 %Cc, %D ; <i1> [#uses=1]
+ ret i1 %E
+}
+
diff --git a/test/Transforms/InstCombine/2004-04-04-InstCombineReplaceAllUsesWith.ll b/test/Transforms/InstCombine/2004-04-04-InstCombineReplaceAllUsesWith.ll
new file mode 100644
index 0000000..84f9bad
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-04-04-InstCombineReplaceAllUsesWith.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define i32 @test() {
+ ret i32 0
+
+Loop: ; preds = %Loop
+ %X = add i32 %X, 1 ; <i32> [#uses=1]
+ br label %Loop
+}
+
diff --git a/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll b/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll
new file mode 100644
index 0000000..8b54937
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+
+%Ty = type opaque
+
+define i32 @test(%Ty* %X) {
+ %Y = bitcast %Ty* %X to i32* ; <i32*> [#uses=1]
+ %Z = load i32* %Y ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
diff --git a/test/Transforms/InstCombine/2004-07-27-ConstantExprMul.ll b/test/Transforms/InstCombine/2004-07-27-ConstantExprMul.ll
new file mode 100644
index 0000000..819260b
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-07-27-ConstantExprMul.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -disable-output
+
+@p = weak global i32 0 ; <i32*> [#uses=1]
+
+define i32 @test(i32 %x) {
+ %y = mul i32 %x, ptrtoint (i32* @p to i32) ; <i32> [#uses=1]
+ ret i32 %y
+}
+
diff --git a/test/Transforms/InstCombine/2004-08-09-RemInfLoop.ll b/test/Transforms/InstCombine/2004-08-09-RemInfLoop.ll
new file mode 100644
index 0000000..f3e5d77
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-08-09-RemInfLoop.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine
+
+; This testcase should not send the instcombiner into an infinite loop!
+
+define i32 @test(i32 %X) {
+ %Y = srem i32 %X, 0 ; <i32> [#uses=1]
+ ret i32 %Y
+}
+
diff --git a/test/Transforms/InstCombine/2004-08-10-BoolSetCC.ll b/test/Transforms/InstCombine/2004-08-10-BoolSetCC.ll
new file mode 100644
index 0000000..1154bb4
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-08-10-BoolSetCC.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {ret i1 false}
+
+define i1 @test(i1 %V) {
+ %Y = icmp ult i1 %V, false ; <i1> [#uses=1]
+ ret i1 %Y
+}
+
diff --git a/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll b/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll
new file mode 100644
index 0000000..8169d21
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -mem2reg -S | \
+; RUN: not grep {i32 1}
+
+; When propagating the load through the select, make sure that the load is
+; inserted where the original load was, not where the select is. Not doing
+; so could produce incorrect results!
+
+define i32 @test(i1 %C) {
+ %X = alloca i32 ; <i32*> [#uses=3]
+ %X2 = alloca i32 ; <i32*> [#uses=2]
+ store i32 1, i32* %X
+ store i32 2, i32* %X2
+ %Y = select i1 %C, i32* %X, i32* %X2 ; <i32*> [#uses=1]
+ store i32 3, i32* %X
+ %Z = load i32* %Y ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
diff --git a/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll b/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll
new file mode 100644
index 0000000..e646edf
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -instcombine -mem2reg -simplifycfg | \
+; RUN: llvm-dis | grep -v store | not grep {i32 1}
+
+; Test to make sure that instcombine does not accidentally propagate the load
+; into the PHI, which would break the program.
+
+define i32 @test(i1 %C) {
+entry:
+ %X = alloca i32 ; <i32*> [#uses=3]
+ %X2 = alloca i32 ; <i32*> [#uses=2]
+ store i32 1, i32* %X
+ store i32 2, i32* %X2
+ br i1 %C, label %cond_true.i, label %cond_continue.i
+
+cond_true.i: ; preds = %entry
+ br label %cond_continue.i
+
+cond_continue.i: ; preds = %cond_true.i, %entry
+ %mem_tmp.i.0 = phi i32* [ %X, %cond_true.i ], [ %X2, %entry ] ; <i32*> [#uses=1]
+ store i32 3, i32* %X
+ %tmp.3 = load i32* %mem_tmp.i.0 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+}
+
+
diff --git a/test/Transforms/InstCombine/2004-09-28-BadShiftAndSetCC.ll b/test/Transforms/InstCombine/2004-09-28-BadShiftAndSetCC.ll
new file mode 100644
index 0000000..27c823b
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-09-28-BadShiftAndSetCC.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep -- -65536
+
+define i1 @test(i32 %tmp.124) {
+ %tmp.125 = shl i32 %tmp.124, 8 ; <i32> [#uses=1]
+ %tmp.126.mask = and i32 %tmp.125, -16777216 ; <i32> [#uses=1]
+ %tmp.128 = icmp eq i32 %tmp.126.mask, 167772160 ; <i1> [#uses=1]
+ ret i1 %tmp.128
+}
+
diff --git a/test/Transforms/InstCombine/2004-11-22-Missed-and-fold.ll b/test/Transforms/InstCombine/2004-11-22-Missed-and-fold.ll
new file mode 100644
index 0000000..730fdc2
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-11-22-Missed-and-fold.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | not grep and
+
+define i8 @test21(i8 %A) {
+ ;; sign extend
+ %C = ashr i8 %A, 7 ; <i8> [#uses=1]
+ ;; chop off sign
+ %D = and i8 %C, 1 ; <i8> [#uses=1]
+ ret i8 %D
+}
+
diff --git a/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll b/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll
new file mode 100644
index 0000000..6672b6c
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll
@@ -0,0 +1,192 @@
+; This test case tests the InstructionCombining optimization that
+; reduces things like:
+; %Y = sext i8 %X to i32
+; %C = icmp ult i32 %Y, 1024
+; to
+; %C = i1 true
+; It includes test cases for different constant values, signedness of the
+; cast operands, and types of setCC operators. In all cases, the cast should
+; be eliminated. In many cases the setCC is also eliminated based on the
+; constant value and the range of the casted value.
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; END.
+define i1 @lt_signed_to_large_unsigned(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ult i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C1 = icmp sgt i8 %SB, -1
+; CHECK: ret i1 %C1
+}
+
+define i1 @lt_signed_to_large_signed(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @lt_signed_to_large_negative(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, -1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @lt_signed_to_small_unsigned(i8 %SB) {
+ %Y = sext i8 %SB to i32
+ %C = icmp ult i32 %Y, 17
+ ret i1 %C
+; CHECK: %C = icmp ult i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @lt_signed_to_small_signed(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, 17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp slt i8 %SB, 17
+; CHECK: ret i1 %C
+}
+define i1 @lt_signed_to_small_negative(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, -17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp slt i8 %SB, -17
+; CHECK: ret i1 %C
+}
+
+define i1 @lt_unsigned_to_large_unsigned(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ult i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @lt_unsigned_to_large_signed(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @lt_unsigned_to_large_negative(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, -1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @lt_unsigned_to_small_unsigned(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ult i32 %Y, 17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp ult i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @lt_unsigned_to_small_signed(i8 %SB) {
+ %Y = zext i8 %SB to i32
+ %C = icmp slt i32 %Y, 17
+ ret i1 %C
+; CHECK: %C = icmp ult i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @lt_unsigned_to_small_negative(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, -17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @gt_signed_to_large_unsigned(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ugt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp slt i8 %SB, 0
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_signed_to_large_signed(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @gt_signed_to_large_negative(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, -1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @gt_signed_to_small_unsigned(i8 %SB) {
+ %Y = sext i8 %SB to i32
+ %C = icmp ugt i32 %Y, 17
+ ret i1 %C
+; CHECK: %C = icmp ugt i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_signed_to_small_signed(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, 17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp sgt i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_signed_to_small_negative(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, -17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp sgt i8 %SB, -17
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_unsigned_to_large_unsigned(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ugt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @gt_unsigned_to_large_signed(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @gt_unsigned_to_large_negative(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, -1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @gt_unsigned_to_small_unsigned(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ugt i32 %Y, 17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp ugt i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_unsigned_to_small_signed(i8 %SB) {
+ %Y = zext i8 %SB to i32
+ %C = icmp sgt i32 %Y, 17
+ ret i1 %C
+; CHECK: %C = icmp ugt i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_unsigned_to_small_negative(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, -17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
diff --git a/test/Transforms/InstCombine/2004-12-08-RemInfiniteLoop.ll b/test/Transforms/InstCombine/2004-12-08-RemInfiniteLoop.ll
new file mode 100644
index 0000000..008afa8
--- /dev/null
+++ b/test/Transforms/InstCombine/2004-12-08-RemInfiniteLoop.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine
+
+define i32 @test(i32 %X) {
+ %Y = srem i32 %X, undef ; <i32> [#uses=1]
+ ret i32 %Y
+}
+
diff --git a/test/Transforms/InstCombine/2005-03-04-ShiftOverflow.ll b/test/Transforms/InstCombine/2005-03-04-ShiftOverflow.ll
new file mode 100644
index 0000000..38553d7
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-03-04-ShiftOverflow.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep {ret i1 false}
+
+define i1 @test(i64 %tmp.169) {
+ %tmp.1710 = lshr i64 %tmp.169, 1 ; <i64> [#uses=1]
+ %tmp.1912 = icmp ugt i64 %tmp.1710, 0 ; <i1> [#uses=1]
+ ret i1 %tmp.1912
+}
+
diff --git a/test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll b/test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll
new file mode 100644
index 0000000..1ec1180
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define i32 @test(i1 %C, i32 %tmp.15) {
+ %tmp.16 = select i1 %C, i32 8, i32 1 ; <i32> [#uses=1]
+ %tmp.18 = udiv i32 %tmp.15, %tmp.16 ; <i32> [#uses=1]
+ ret i32 %tmp.18
+}
+
diff --git a/test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll b/test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll
new file mode 100644
index 0000000..9846ee7
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define i32 @_Z13func_31585107li(i32 %l_39521025, i32 %l_59244666) {
+ %shortcirc_val = select i1 false, i32 1, i32 0 ; <i32> [#uses=1]
+ %tmp.8 = udiv i32 0, %shortcirc_val ; <i32> [#uses=1]
+ %tmp.9 = icmp eq i32 %tmp.8, 0 ; <i1> [#uses=1]
+ %retval = select i1 %tmp.9, i32 %l_59244666, i32 -1621308501 ; <i32> [#uses=1]
+ ret i32 %retval
+}
+
diff --git a/test/Transforms/InstCombine/2005-06-15-ShiftSetCCCrash.ll b/test/Transforms/InstCombine/2005-06-15-ShiftSetCCCrash.ll
new file mode 100644
index 0000000..e2d0618
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-06-15-ShiftSetCCCrash.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR577
+
+define i1 @test() {
+ %tmp.3 = shl i32 0, 41 ; <i32> [#uses=1]
+ %tmp.4 = icmp ne i32 %tmp.3, 0 ; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
+
diff --git a/test/Transforms/InstCombine/2005-06-16-RangeCrash.ll b/test/Transforms/InstCombine/2005-06-16-RangeCrash.ll
new file mode 100644
index 0000000..f0e60ac
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-06-16-RangeCrash.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR585
+
+define i1 @test() {
+ %tmp.26 = sdiv i32 0, -2147483648 ; <i32> [#uses=1]
+ %tmp.27 = icmp eq i32 %tmp.26, 0 ; <i1> [#uses=1]
+ ret i1 %tmp.27
+}
+
diff --git a/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll b/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
new file mode 100644
index 0000000..3d887dd
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {ret i1 true}
+; PR586
+
+@g_07918478 = external global i32 ; <i32*> [#uses=1]
+
+define i1 @test() {
+ %tmp.0 = load i32* @g_07918478 ; <i32> [#uses=2]
+ %tmp.1 = icmp ne i32 %tmp.0, 0 ; <i1> [#uses=1]
+ %tmp.4 = icmp ult i32 %tmp.0, 4111 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp.1, %tmp.4 ; <i1> [#uses=1]
+ ret i1 %bothcond
+}
+
diff --git a/test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll b/test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll
new file mode 100644
index 0000000..caee951
--- /dev/null
+++ b/test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -disable-output
+
+; This example caused instcombine to spin into an infinite loop.
+
+define void @test(i32* %P) {
+ ret void
+
+Dead: ; preds = %Dead
+ %X = phi i32 [ %Y, %Dead ] ; <i32> [#uses=1]
+ %Y = sdiv i32 %X, 10 ; <i32> [#uses=2]
+ store i32 %Y, i32* %P
+ br label %Dead
+}
+
diff --git a/test/Transforms/InstCombine/2006-02-13-DemandedMiscompile.ll b/test/Transforms/InstCombine/2006-02-13-DemandedMiscompile.ll
new file mode 100644
index 0000000..10541ef
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-02-13-DemandedMiscompile.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep undef
+
+define i32 @test(i8 %A) {
+ %B = sext i8 %A to i32 ; <i32> [#uses=1]
+ %C = ashr i32 %B, 8 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+
diff --git a/test/Transforms/InstCombine/2006-02-28-Crash.ll b/test/Transforms/InstCombine/2006-02-28-Crash.ll
new file mode 100644
index 0000000..9bea14c
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-02-28-Crash.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define i32 @test() {
+ %tmp203 = icmp eq i32 1, 2 ; <i1> [#uses=1]
+ %tmp203.upgrd.1 = zext i1 %tmp203 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp203.upgrd.1
+}
+
diff --git a/test/Transforms/InstCombine/2006-03-30-ExtractElement.ll b/test/Transforms/InstCombine/2006-03-30-ExtractElement.ll
new file mode 100644
index 0000000..aa7d587
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-03-30-ExtractElement.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define float @test(<4 x float> %V) {
+ %V2 = insertelement <4 x float> %V, float 1.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
+ %R = extractelement <4 x float> %V2, i32 2 ; <float> [#uses=1]
+ ret float %R
+}
+
diff --git a/test/Transforms/InstCombine/2006-04-28-ShiftShiftLongLong.ll b/test/Transforms/InstCombine/2006-04-28-ShiftShiftLongLong.ll
new file mode 100644
index 0000000..c337ea7
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-04-28-ShiftShiftLongLong.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; This cannot be turned into a sign extending cast!
+
+define i64 @test(i64 %X) {
+ %Y = shl i64 %X, 16 ; <i64> [#uses=1]
+; CHECK: %Y = shl i64 %X, 16
+ %Z = ashr i64 %Y, 16 ; <i64> [#uses=1]
+; CHECK: %Z = ashr i64 %Y, 16
+ ret i64 %Z
+; CHECK: ret i64 %Z
+}
+
diff --git a/test/Transforms/InstCombine/2006-05-04-DemandedBitCrash.ll b/test/Transforms/InstCombine/2006-05-04-DemandedBitCrash.ll
new file mode 100644
index 0000000..e22395f
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-05-04-DemandedBitCrash.ll
@@ -0,0 +1,51 @@
+; RUN: opt < %s -instcombine -disable-output
+; END.
+
+define void @test() {
+bb38.i:
+ %varspec.0.i1014 = bitcast i64 123814269237067777 to i64 ; <i64> [#uses=1]
+ %locspec.0.i1015 = bitcast i32 1 to i32 ; <i32> [#uses=2]
+ %tmp51391.i1018 = lshr i64 %varspec.0.i1014, 16 ; <i64> [#uses=1]
+ %tmp51392.i1019 = trunc i64 %tmp51391.i1018 to i32 ; <i32> [#uses=2]
+ %tmp51392.mask.i1020 = lshr i32 %tmp51392.i1019, 29 ; <i32> [#uses=1]
+ %tmp7.i1021 = and i32 %tmp51392.mask.i1020, 1 ; <i32> [#uses=2]
+ %tmp18.i1026 = lshr i32 %tmp51392.i1019, 31 ; <i32> [#uses=2]
+ %tmp18.i1027 = trunc i32 %tmp18.i1026 to i8 ; <i8> [#uses=1]
+ br i1 false, label %cond_false1148.i1653, label %bb377.i1259
+
+bb377.i1259: ; preds = %bb38.i
+ br i1 false, label %cond_true541.i1317, label %cond_false1148.i1653
+
+cond_true541.i1317: ; preds = %bb377.i1259
+ %tmp545.i1318 = lshr i32 %locspec.0.i1015, 10 ; <i32> [#uses=1]
+ %tmp550.i1319 = lshr i32 %locspec.0.i1015, 4 ; <i32> [#uses=1]
+ %tmp550551.i1320 = and i32 %tmp550.i1319, 63 ; <i32> [#uses=1]
+ %tmp553.i1321 = icmp ult i32 %tmp550551.i1320, 4 ; <i1> [#uses=1]
+ %tmp558.i1322 = icmp eq i32 %tmp7.i1021, 0 ; <i1> [#uses=1]
+ %bothcond.i1326 = or i1 %tmp553.i1321, false ; <i1> [#uses=1]
+ %bothcond1.i1327 = or i1 %bothcond.i1326, false ; <i1> [#uses=1]
+ %bothcond2.not.i1328 = or i1 %bothcond1.i1327, false ; <i1> [#uses=1]
+ %bothcond3.i1329 = or i1 %bothcond2.not.i1328, %tmp558.i1322 ; <i1> [#uses=0]
+ br i1 false, label %cond_true583.i1333, label %cond_next592.i1337
+
+cond_true583.i1333: ; preds = %cond_true541.i1317
+ br i1 false, label %cond_true586.i1335, label %cond_next592.i1337
+
+cond_true586.i1335: ; preds = %cond_true583.i1333
+ br label %cond_true.i
+
+cond_next592.i1337: ; preds = %cond_true583.i1333, %cond_true541.i1317
+ %mask_z.0.i1339 = phi i32 [ %tmp18.i1026, %cond_true541.i1317 ], [ 0, %cond_true583.i1333 ] ; <i32> [#uses=0]
+ %tmp594.i1340 = and i32 %tmp545.i1318, 15 ; <i32> [#uses=0]
+ br label %cond_true.i
+
+cond_false1148.i1653: ; preds = %bb377.i1259, %bb38.i
+ %tmp1150.i1654 = icmp eq i32 %tmp7.i1021, 0 ; <i1> [#uses=1]
+ %tmp1160.i1656 = icmp eq i8 %tmp18.i1027, 0 ; <i1> [#uses=1]
+ %bothcond8.i1658 = or i1 %tmp1150.i1654, %tmp1160.i1656 ; <i1> [#uses=1]
+ %bothcond9.i1659 = or i1 %bothcond8.i1658, false ; <i1> [#uses=0]
+ br label %cond_true.i
+
+cond_true.i: ; preds = %cond_false1148.i1653, %cond_next592.i1337, %cond_true586.i1335
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2006-09-15-CastToBool.ll b/test/Transforms/InstCombine/2006-09-15-CastToBool.ll
new file mode 100644
index 0000000..ee261ce
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-09-15-CastToBool.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep and
+; PR913
+
+define i32 @test(i32* %tmp1) {
+ %tmp.i = load i32* %tmp1 ; <i32> [#uses=1]
+ %tmp = bitcast i32 %tmp.i to i32 ; <i32> [#uses=1]
+ %tmp2.ui = lshr i32 %tmp, 5 ; <i32> [#uses=1]
+ %tmp2 = bitcast i32 %tmp2.ui to i32 ; <i32> [#uses=1]
+ %tmp3 = and i32 %tmp2, 1 ; <i32> [#uses=1]
+ %tmp3.upgrd.1 = icmp ne i32 %tmp3, 0 ; <i1> [#uses=1]
+ %tmp34 = zext i1 %tmp3.upgrd.1 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp34
+}
+
diff --git a/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll b/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll
new file mode 100644
index 0000000..889bbcf
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll
@@ -0,0 +1,10 @@
+; The optimizer should be able to remove cast operation here.
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep sext.*i32
+
+define i1 @eq_signed_to_small_unsigned(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp eq i32 %Y, 17 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
diff --git a/test/Transforms/InstCombine/2006-10-20-mask.ll b/test/Transforms/InstCombine/2006-10-20-mask.ll
new file mode 100644
index 0000000..0aaa5e8
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-10-20-mask.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep and
+
+define i64 @foo(i64 %tmp, i64 %tmp2) {
+ %tmp.upgrd.1 = trunc i64 %tmp to i32 ; <i32> [#uses=1]
+ %tmp2.upgrd.2 = trunc i64 %tmp2 to i32 ; <i32> [#uses=1]
+ %tmp3 = and i32 %tmp.upgrd.1, %tmp2.upgrd.2 ; <i32> [#uses=1]
+ %tmp4 = zext i32 %tmp3 to i64 ; <i64> [#uses=1]
+ ret i64 %tmp4
+}
+
diff --git a/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll b/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll
new file mode 100644
index 0000000..d3ba1e2
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep mul | count 2
+
+define <4 x float> @test(<4 x float> %V) {
+ %Y = fmul <4 x float> %V, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
+ %Z = fmul <4 x float> %Y, < float 1.000000e+00, float 2.000000e+05, float -3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
+ ret <4 x float> %Z
+}
+
diff --git a/test/Transforms/InstCombine/2006-11-03-Memmove64.ll b/test/Transforms/InstCombine/2006-11-03-Memmove64.ll
new file mode 100644
index 0000000..35bb45e
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-11-03-Memmove64.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep memmove.i32
+; Instcombine was trying to turn this into a memmove.i32
+
+target datalayout = "e-p:64:64"
+target triple = "alphaev67-unknown-linux-gnu"
+@str10 = internal constant [1 x i8] zeroinitializer ; <[1 x i8]*> [#uses=1]
+
+define void @do_join(i8* %b) {
+entry:
+ call void @llvm.memmove.i64( i8* %b, i8* getelementptr ([1 x i8]* @str10, i32 0, i64 0), i64 1, i32 1 )
+ ret void
+}
+
+declare void @llvm.memmove.i64(i8*, i8*, i64, i32)
+
diff --git a/test/Transforms/InstCombine/2006-11-10-ashr-miscompile.ll b/test/Transforms/InstCombine/2006-11-10-ashr-miscompile.ll
new file mode 100644
index 0000000..7799423
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-11-10-ashr-miscompile.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep lshr
+; Verify this is not turned into -1.
+
+define i32 @test(i8 %amt) {
+ %shift.upgrd.1 = zext i8 %amt to i32 ; <i32> [#uses=1]
+ %B = lshr i32 -1, %shift.upgrd.1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
diff --git a/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll b/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll
new file mode 100644
index 0000000..7adeb9f
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep sub
+; RUN: opt < %s -instcombine -S | grep add
+
+define <4 x float> @test(<4 x float> %tmp26, <4 x float> %tmp53) {
+ ; (X+Y)-Y != X for fp vectors.
+ %tmp64 = fadd <4 x float> %tmp26, %tmp53 ; <<4 x float>> [#uses=1]
+ %tmp75 = fsub <4 x float> %tmp64, %tmp53 ; <<4 x float>> [#uses=1]
+ ret <4 x float> %tmp75
+}
diff --git a/test/Transforms/InstCombine/2006-12-05-fp-to-int-ext.ll b/test/Transforms/InstCombine/2006-12-05-fp-to-int-ext.ll
new file mode 100644
index 0000000..74483c1
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-05-fp-to-int-ext.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | grep zext
+
+; Never merge these two conversions, even though it's possible: this is
+; significantly more expensive than the two conversions on some targets
+; and it causes libgcc to be compile __fixunsdfdi into a recursive
+; function.
+define i64 @test(double %D) {
+ %A = fptoui double %D to i32 ; <i32> [#uses=1]
+ %B = zext i32 %A to i64 ; <i64> [#uses=1]
+ ret i64 %B
+}
+
diff --git a/test/Transforms/InstCombine/2006-12-08-ICmp-Combining.ll b/test/Transforms/InstCombine/2006-12-08-ICmp-Combining.ll
new file mode 100644
index 0000000..80ee3e2
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-08-ICmp-Combining.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {%bothcond =}
+
+define i1 @Doit_bb(i32 %i.0) {
+bb:
+ %tmp = icmp sgt i32 %i.0, 0 ; <i1> [#uses=1]
+ %tmp.not = xor i1 %tmp, true ; <i1> [#uses=1]
+ %tmp2 = icmp sgt i32 %i.0, 8 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp.not, %tmp2 ; <i1> [#uses=1]
+ br i1 %bothcond, label %exitTrue, label %exitFalse
+
+exitTrue: ; preds = %bb
+ ret i1 true
+
+exitFalse: ; preds = %bb
+ ret i1 false
+}
+
diff --git a/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll b/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll
new file mode 100644
index 0000000..5a74bd2
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll
@@ -0,0 +1,51 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {icmp sgt}
+; END.
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+ %struct.point = type { i32, i32 }
+
+define i32 @visible(i32 %direction, i64 %p1.0, i64 %p2.0, i64 %p3.0) {
+entry:
+ %p1_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p2_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p3_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp.upgrd.1 = getelementptr { i64 }* %tmp, i64 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p1.0, i64* %tmp.upgrd.1
+ %tmp1 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp2 = getelementptr { i64 }* %tmp1, i64 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p2.0, i64* %tmp2
+ %tmp3 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp4 = getelementptr { i64 }* %tmp3, i64 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p3.0, i64* %tmp4
+ %tmp.upgrd.2 = icmp eq i32 %direction, 0 ; <i1> [#uses=1]
+ %tmp5 = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp6 = getelementptr { i64 }* %tmp5, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp.upgrd.3 = load i64* %tmp6 ; <i64> [#uses=1]
+ %tmp7 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp8 = getelementptr { i64 }* %tmp7, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp9 = load i64* %tmp8 ; <i64> [#uses=1]
+ %tmp10 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp11 = getelementptr { i64 }* %tmp10, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp12 = load i64* %tmp11 ; <i64> [#uses=1]
+ %tmp13 = call i32 @determinant( i64 %tmp.upgrd.3, i64 %tmp9, i64 %tmp12 ) ; <i32> [#uses=2]
+ br i1 %tmp.upgrd.2, label %cond_true, label %cond_false
+
+cond_true: ; preds = %entry
+ %tmp14 = icmp slt i32 %tmp13, 0 ; <i1> [#uses=1]
+ %tmp14.upgrd.4 = zext i1 %tmp14 to i32 ; <i32> [#uses=1]
+ br label %return
+
+cond_false: ; preds = %entry
+ %tmp26 = icmp sgt i32 %tmp13, 0 ; <i1> [#uses=1]
+ %tmp26.upgrd.5 = zext i1 %tmp26 to i32 ; <i32> [#uses=1]
+ br label %return
+
+return: ; preds = %cond_false, %cond_true
+ %retval.0 = phi i32 [ %tmp14.upgrd.4, %cond_true ], [ %tmp26.upgrd.5, %cond_false ] ; <i32> [#uses=1]
+ ret i32 %retval.0
+}
+
+declare i32 @determinant(i64, i64, i64)
diff --git a/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll b/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll
new file mode 100644
index 0000000..2665791
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll
@@ -0,0 +1,41 @@
+; RUN: opt < %s -instcombine -S | grep select
+; END.
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+ %struct.point = type { i32, i32 }
+
+define i32 @visible(i32 %direction, i64 %p1.0, i64 %p2.0, i64 %p3.0) {
+entry:
+ %p1_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p2_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p3_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %tmp = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp.upgrd.1 = getelementptr { i64 }* %tmp, i32 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p1.0, i64* %tmp.upgrd.1
+ %tmp1 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp2 = getelementptr { i64 }* %tmp1, i32 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p2.0, i64* %tmp2
+ %tmp3 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp4 = getelementptr { i64 }* %tmp3, i32 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p3.0, i64* %tmp4
+ %tmp.upgrd.2 = icmp eq i32 %direction, 0 ; <i1> [#uses=1]
+ %tmp5 = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp6 = getelementptr { i64 }* %tmp5, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp.upgrd.3 = load i64* %tmp6 ; <i64> [#uses=1]
+ %tmp7 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp8 = getelementptr { i64 }* %tmp7, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp9 = load i64* %tmp8 ; <i64> [#uses=1]
+ %tmp10 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp11 = getelementptr { i64 }* %tmp10, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp12 = load i64* %tmp11 ; <i64> [#uses=1]
+ %tmp13 = call i32 @determinant( i64 %tmp.upgrd.3, i64 %tmp9, i64 %tmp12 ) ; <i32> [#uses=2]
+ %tmp14 = icmp slt i32 %tmp13, 0 ; <i1> [#uses=1]
+ %tmp26 = icmp sgt i32 %tmp13, 0 ; <i1> [#uses=1]
+ %retval.0.in = select i1 %tmp.upgrd.2, i1 %tmp14, i1 %tmp26 ; <i1> [#uses=1]
+ %retval.0 = zext i1 %retval.0.in to i32 ; <i32> [#uses=1]
+ ret i32 %retval.0
+}
+
+declare i32 @determinant(i64, i64, i64)
+
diff --git a/test/Transforms/InstCombine/2006-12-15-Range-Test.ll b/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
new file mode 100644
index 0000000..c3700a0
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
@@ -0,0 +1,31 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep icmp | count 1
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {icmp ugt} | count 1
+; END.
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+@r = external global [17 x i32] ; <[17 x i32]*> [#uses=1]
+
+define i1 @print_pgm_cond_true(i32 %tmp12.reload, i32* %tmp16.out) {
+newFuncRoot:
+ br label %cond_true
+
+bb27.exitStub: ; preds = %cond_true
+ store i32 %tmp16, i32* %tmp16.out
+ ret i1 true
+
+cond_next23.exitStub: ; preds = %cond_true
+ store i32 %tmp16, i32* %tmp16.out
+ ret i1 false
+
+cond_true: ; preds = %newFuncRoot
+ %tmp15 = getelementptr [17 x i32]* @r, i32 0, i32 %tmp12.reload ; <i32*> [#uses=1]
+ %tmp16 = load i32* %tmp15 ; <i32> [#uses=4]
+ %tmp18 = icmp slt i32 %tmp16, -31 ; <i1> [#uses=1]
+ %tmp21 = icmp sgt i32 %tmp16, 31 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp18, %tmp21 ; <i1> [#uses=1]
+ br i1 %bothcond, label %bb27.exitStub, label %cond_next23.exitStub
+}
+
diff --git a/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll b/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll
new file mode 100644
index 0000000..eba1ac1
--- /dev/null
+++ b/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll
@@ -0,0 +1,30 @@
+; For PR1065. This causes an assertion in instcombine if a select with two cmp
+; operands is encountered.
+; RUN: opt < %s -instcombine -disable-output
+; END.
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+ %struct.internal_state = type { i32 }
+ %struct.mng_data = type { i32, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i32, i32, i8, i32, i32, i32, i32, i16, i16, i16, i8, i8, double, double, double, i8, i8, i8, i8, i32, i32, i32, i32, i32, i8, i32, i32, i8*, i8* (i32)*, void (i8*, i32)*, void (i8*, i8*, i32)*, i8 (%struct.mng_data*)*, i8 (%struct.mng_data*)*, i8 (%struct.mng_data*, i8*, i32, i32*)*, i8 (%struct.mng_data*, i8*, i32, i32*)*, i8 (%struct.mng_data*, i32, i8, i32, i32, i32, i32, i8*)*, i8 (%struct.mng_data*, i32, i32, i8*)*, i8 (%struct.mng_data*, i32, i32)*, i8 (%struct.mng_data*, i8, i8*, i8*, i8*, i8*)*, i8 (%struct.mng_data*)*, i8 (%struct.mng_data*, i8*)*, i8 (%struct.mng_data*, i8*)*, i8 (%struct.mng_data*, i32, i32)*, i8 (%struct.mng_data*, i32, i32, i8*)*, i8 (%struct.mng_data*, i8, i8, i32, i32)*, i8* (%struct.mng_data*, i32)*, i8* (%struct.mng_data*, i32)*, i8* (%struct.mng_data*, i32)*, i8 (%struct.mng_data*, i32, i32, i32, i32)*, i32 (%struct.mng_data*)*, i8 (%struct.mng_data*, i32)*, i8 (%struct.mng_data*, i32)*, i8 (%struct.mng_data*, i32, i32, i32, i32, i32, i32, i32, i32)*, i8 (%struct.mng_data*, i8)*, i8 (%struct.mng_data*, i32, i8*)*, i8 (%struct.mng_data*, i32, i8, i8*)*, i8, i32, i32, i8*, i8*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i8, i8, i8, i8, i8, i32, i8, i8, i8, i32, i8*, i32, i8*, i32, i8, i8, i8, i32, i8*, i8*, i32, i32, i8*, i8*, %struct.mng_pushdata*, %struct.mng_pushdata*, %struct.mng_pushdata*, %struct.mng_pushdata*, i8, i8, i32, i32, i8*, i8, i8, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8, i32, i32, i8*, i32, i32, i32, i8, i8, i32, i32, i32, i32, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8*, i8*, i8*, i32, i8*, i8*, i8*, i8*, i8*, %struct.mng_savedata*, i32, i32, i32, i32, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i8*, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i8*, i8*, i8*, i8*, [256 x i8], double, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, i16, i8, i8, i8, i8, i8, i32, i32, i8, i32, i32, i32, i32, i16, i16, i16, i8, i16, i8, i32, i32, i32, i32, i8, i32, i32, i8, i32, i32, i32, i32, i8, i32, i32, i8, i32, i32, i32, i32, i32, i8, i32, i8, i16, i16, i16, i16, i32, [256 x %struct.mng_palette8e], i32, [256 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i8*, i16, i16, i16, i8*, i8, i8, i32, i32, i32, i32, i8, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, i8*, i8, i8, i8, i32, i8*, i8*, i16, i16, i16, i16, i32, i32, i8*, %struct.z_stream, i32, i32, i32, i32, i32, i32, i8, i8, [256 x i32], i8 }
+ %struct.mng_palette8e = type { i8, i8, i8 }
+ %struct.mng_pushdata = type { i8*, i8*, i32, i8, i8*, i32 }
+ %struct.mng_savedata = type { i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i8, i16, i8, i8, i32, i32, i8, i32, i32, i32, i32, i32, [256 x %struct.mng_palette8e], i32, [256 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i8*, i16, i16, i16 }
+ %struct.z_stream = type { i8*, i32, i32, i8*, i32, i32, i8*, %struct.internal_state*, i8* (i8*, i32, i32)*, void (i8*, i8*)*, i8*, i32, i32, i32 }
+
+define void @mng_write_basi() {
+entry:
+ %tmp = load i8* null ; <i8> [#uses=1]
+ %tmp.upgrd.1 = icmp ugt i8 %tmp, 8 ; <i1> [#uses=1]
+ %tmp.upgrd.2 = load i16* null ; <i16> [#uses=2]
+ %tmp3 = icmp eq i16 %tmp.upgrd.2, 255 ; <i1> [#uses=1]
+ %tmp7 = icmp eq i16 %tmp.upgrd.2, -1 ; <i1> [#uses=1]
+ %bOpaque.0.in = select i1 %tmp.upgrd.1, i1 %tmp7, i1 %tmp3 ; <i1> [#uses=1]
+ br i1 %bOpaque.0.in, label %cond_next90, label %bb95
+
+cond_next90: ; preds = %entry
+ ret void
+
+bb95: ; preds = %entry
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll b/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll
new file mode 100644
index 0000000..e5238a5
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep {icmp ugt}
+; PR1107
+; PR1940
+
+define i1 @test(i8 %A, i8 %B) {
+ %a = zext i8 %A to i32
+ %b = zext i8 %B to i32
+ %c = icmp sgt i32 %a, %b
+ ret i1 %c
+}
diff --git a/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll b/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll
new file mode 100644
index 0000000..d2d215f
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll
@@ -0,0 +1,6 @@
+; RUN: opt < %s -instcombine -S | grep {fcmp uno.*0.0}
+; PR1111
+define i1 @test(double %X) {
+ %tmp = fcmp une double %X, %X
+ ret i1 %tmp
+}
diff --git a/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll b/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll
new file mode 100644
index 0000000..fed2255
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define <4 x i32> @test(<4 x i32> %A) {
+ %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
+ %C = and <4 x i32> %B, < i32 -1, i32 -1, i32 -1, i32 -1 >
+ ret <4 x i32> %C
+}
diff --git a/test/Transforms/InstCombine/2007-01-27-AndICmp.ll b/test/Transforms/InstCombine/2007-01-27-AndICmp.ll
new file mode 100644
index 0000000..bd15dce
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-01-27-AndICmp.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep {ugt.*, 1}
+
+define i1 @test(i32 %tmp1030) {
+ %tmp1037 = icmp ne i32 %tmp1030, 40 ; <i1> [#uses=1]
+ %tmp1039 = icmp ne i32 %tmp1030, 41 ; <i1> [#uses=1]
+ %tmp1042 = and i1 %tmp1037, %tmp1039 ; <i1> [#uses=1]
+ ret i1 %tmp1042
+}
diff --git a/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll b/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll
new file mode 100644
index 0000000..05891a2
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll
@@ -0,0 +1,45 @@
+; RUN: opt < %s -instcombine -mem2reg -S | grep {%A = alloca}
+; RUN: opt < %s -instcombine -mem2reg -S | \
+; RUN: not grep {%B = alloca}
+; END.
+
+; Ensure that instcombine doesn't sink the loads in entry/cond_true into
+; cond_next. Doing so prevents mem2reg from promoting the B alloca.
+
+define i32 @test2(i32 %C) {
+entry:
+ %A = alloca i32
+ %B = alloca i32
+ %tmp = call i32 (...)* @bar( i32* %A ) ; <i32> [#uses=0]
+ %T = load i32* %A ; <i32> [#uses=1]
+ %tmp2 = icmp eq i32 %C, 0 ; <i1> [#uses=1]
+ br i1 %tmp2, label %cond_next, label %cond_true
+
+cond_true: ; preds = %entry
+ store i32 123, i32* %B
+ call i32 @test2( i32 123 ) ; <i32>:0 [#uses=0]
+ %T1 = load i32* %B ; <i32> [#uses=1]
+ br label %cond_next
+
+cond_next: ; preds = %cond_true, %entry
+ %tmp1.0 = phi i32 [ %T1, %cond_true ], [ %T, %entry ] ; <i32> [#uses=1]
+ %tmp7 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp8 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp9 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp10 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp11 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp12 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp13 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp14 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp15 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp16 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp17 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp18 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp19 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp20 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ ret i32 %tmp1.0
+}
+
+declare i32 @bar(...)
+
+declare i32 @baq(...)
diff --git a/test/Transforms/InstCombine/2007-02-07-PointerCast.ll b/test/Transforms/InstCombine/2007-02-07-PointerCast.ll
new file mode 100644
index 0000000..bf60991
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-02-07-PointerCast.ll
@@ -0,0 +1,22 @@
+;RUN: opt < %s -instcombine -S | grep zext
+
+; Make sure the uint isn't removed. Instcombine in llvm 1.9 was dropping the
+; uint cast which was causing a sign extend. This only affected code with
+; pointers in the high half of memory, so it wasn't noticed much
+; compile a kernel though...
+
+target datalayout = "e-p:32:32"
+@str = internal constant [6 x i8] c"%llx\0A\00" ; <[6 x i8]*> [#uses=1]
+
+declare i32 @printf(i8*, ...)
+
+define i32 @main(i32 %x, i8** %a) {
+entry:
+ %tmp = getelementptr [6 x i8]* @str, i32 0, i64 0 ; <i8*> [#uses=1]
+ %tmp1 = load i8** %a ; <i8*> [#uses=1]
+ %tmp2 = ptrtoint i8* %tmp1 to i32 ; <i32> [#uses=1]
+ %tmp3 = zext i32 %tmp2 to i64 ; <i64> [#uses=1]
+ %tmp.upgrd.1 = call i32 (i8*, ...)* @printf( i8* %tmp, i64 %tmp3 ) ; <i32> [#uses=0]
+ ret i32 0
+}
+
diff --git a/test/Transforms/InstCombine/2007-02-23-PhiFoldInfLoop.ll b/test/Transforms/InstCombine/2007-02-23-PhiFoldInfLoop.ll
new file mode 100644
index 0000000..f31c280
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-02-23-PhiFoldInfLoop.ll
@@ -0,0 +1,31 @@
+; RUN: opt < %s -instcombine -S | grep ret
+; PR1217
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+ %struct.termbox = type { %struct.termbox*, i32, i32, i32, i32, i32 }
+
+
+define void @ggenorien() {
+entry:
+ %tmp68 = icmp eq %struct.termbox* null, null ; <i1> [#uses=1]
+ br i1 %tmp68, label %cond_next448, label %bb80
+
+bb80: ; preds = %entry
+ ret void
+
+cond_next448: ; preds = %entry
+ br i1 false, label %bb756, label %bb595
+
+bb595: ; preds = %cond_next448
+ br label %bb609
+
+bb609: ; preds = %bb756, %bb595
+ %termnum.6240.0 = phi i32 [ 2, %bb595 ], [ %termnum.6, %bb756 ] ; <i32> [#uses=1]
+ %tmp755 = add i32 %termnum.6240.0, 1 ; <i32> [#uses=1]
+ br label %bb756
+
+bb756: ; preds = %bb609, %cond_next448
+ %termnum.6 = phi i32 [ %tmp755, %bb609 ], [ 2, %cond_next448 ] ; <i32> [#uses=1]
+ br label %bb609
+}
diff --git a/test/Transforms/InstCombine/2007-03-13-CompareMerge.ll b/test/Transforms/InstCombine/2007-03-13-CompareMerge.ll
new file mode 100644
index 0000000..109e4a2
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-13-CompareMerge.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {icmp sle}
+; PR1244
+
+define i1 @test(i32 %c.3.i, i32 %d.292.2.i) {
+ %tmp266.i = icmp slt i32 %c.3.i, %d.292.2.i
+ %tmp276.i = icmp eq i32 %c.3.i, %d.292.2.i
+ %sel_tmp80 = or i1 %tmp266.i, %tmp276.i
+ ret i1 %sel_tmp80
+}
diff --git a/test/Transforms/InstCombine/2007-03-19-BadTruncChangePR1261.ll b/test/Transforms/InstCombine/2007-03-19-BadTruncChangePR1261.ll
new file mode 100644
index 0000000..589bd80
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-19-BadTruncChangePR1261.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep zext
+; PR1261.
+
+define i16 @test(i31 %zzz) {
+ %A = sext i31 %zzz to i32
+ %B = add i32 %A, 16384
+ %C = lshr i32 %B, 15
+ %D = trunc i32 %C to i16
+ ret i16 %D
+}
diff --git a/test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll b/test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll
new file mode 100644
index 0000000..ca93af3
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll
@@ -0,0 +1,7 @@
+; For PR1248
+; RUN: opt < %s -instcombine -S | grep {ugt i32 .*, 11}
+define i1 @test(i32 %tmp6) {
+ %tmp7 = sdiv i32 %tmp6, 12 ; <i32> [#uses=1]
+ icmp ne i32 %tmp7, -6 ; <i1>:1 [#uses=1]
+ ret i1 %1
+}
diff --git a/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll b/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll
new file mode 100644
index 0000000..c794004
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll
@@ -0,0 +1,29 @@
+; PR1271
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {icmp eq i32 .tmp.*, 2146435072}
+%struct..0anon = type { i32, i32 }
+%struct..1anon = type { double }
+
+define i32 @main() {
+entry:
+ %u = alloca %struct..1anon, align 8 ; <%struct..1anon*> [#uses=4]
+ %tmp1 = getelementptr %struct..1anon* %u, i32 0, i32 0 ; <double*> [#uses=1]
+ store double 0x7FF0000000000000, double* %tmp1
+ %tmp3 = getelementptr %struct..1anon* %u, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp34 = bitcast double* %tmp3 to %struct..0anon* ; <%struct..0anon*> [#uses=1]
+ %tmp5 = getelementptr %struct..0anon* %tmp34, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp6 = load i32* %tmp5 ; <i32> [#uses=1]
+ %tmp7 = shl i32 %tmp6, 1 ; <i32> [#uses=1]
+ %tmp8 = lshr i32 %tmp7, 21 ; <i32> [#uses=1]
+ %tmp89 = trunc i32 %tmp8 to i16 ; <i16> [#uses=1]
+ icmp ne i16 %tmp89, 2047 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i8 ; <i8>:1 [#uses=1]
+ icmp ne i8 %1, 0 ; <i1>:2 [#uses=1]
+ br i1 %2, label %cond_true, label %cond_false
+
+cond_true: ; preds = %entry
+ ret i32 0
+
+cond_false: ; preds = %entry
+ ret i32 1
+}
diff --git a/test/Transforms/InstCombine/2007-03-25-DoubleShift.ll b/test/Transforms/InstCombine/2007-03-25-DoubleShift.ll
new file mode 100644
index 0000000..0d4aac2
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-25-DoubleShift.ll
@@ -0,0 +1,9 @@
+; PR1271
+; RUN: opt < %s -instcombine -S | grep and
+define i1 @test(i32 %tmp13) {
+entry:
+ %tmp14 = shl i32 %tmp13, 12 ; <i32> [#uses=1]
+ %tmp15 = lshr i32 %tmp14, 12 ; <i32> [#uses=1]
+ %res = icmp ne i32 %tmp15, 0 ; <i1>:3 [#uses=1]
+ ret i1 %res
+}
diff --git a/test/Transforms/InstCombine/2007-03-26-BadShiftMask.ll b/test/Transforms/InstCombine/2007-03-26-BadShiftMask.ll
new file mode 100644
index 0000000..5bcb543
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-03-26-BadShiftMask.ll
@@ -0,0 +1,36 @@
+; PR1271
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {ashr i32 %.mp137, 2}
+; END.
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "i686-pc-linux-gnu"
+
+
+define i1 @test(i32* %tmp141, i32* %tmp145,
+ i32 %b8, i32 %iftmp.430.0, i32* %tmp134.out, i32* %tmp137.out)
+{
+newFuncRoot:
+ %tmp133 = and i32 %b8, 1 ; <i32> [#uses=1]
+ %tmp134 = shl i32 %tmp133, 3 ; <i32> [#uses=3]
+ %tmp136 = ashr i32 %b8, 1 ; <i32> [#uses=1]
+ %tmp137 = shl i32 %tmp136, 3 ; <i32> [#uses=3]
+ %tmp139 = ashr i32 %tmp134, 2 ; <i32> [#uses=1]
+ store i32 %tmp139, i32* %tmp141
+ %tmp143 = ashr i32 %tmp137, 2 ; <i32> [#uses=1]
+ store i32 %tmp143, i32* %tmp145
+ icmp eq i32 %iftmp.430.0, 0 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i8 ; <i8>:1 [#uses=1]
+ icmp ne i8 %1, 0 ; <i1>:2 [#uses=1]
+ br i1 %2, label %cond_true147.exitStub, label %cond_false252.exitStub
+
+cond_true147.exitStub: ; preds = %newFuncRoot
+ store i32 %tmp134, i32* %tmp134.out
+ store i32 %tmp137, i32* %tmp137.out
+ ret i1 true
+
+cond_false252.exitStub: ; preds = %newFuncRoot
+ store i32 %tmp134, i32* %tmp134.out
+ store i32 %tmp137, i32* %tmp137.out
+ ret i1 false
+}
diff --git a/test/Transforms/InstCombine/2007-04-04-BadFoldBitcastIntoMalloc.ll b/test/Transforms/InstCombine/2007-04-04-BadFoldBitcastIntoMalloc.ll
new file mode 100644
index 0000000..b59d3c8
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-04-04-BadFoldBitcastIntoMalloc.ll
@@ -0,0 +1,19 @@
+; In the presence of a negative offset (the -8 below), a fold of a bitcast into
+; a malloc messes up the element count, causing an extra 4GB to be allocated on
+; 64-bit targets.
+;
+; RUN: opt < %s -instcombine -S | not grep {= add }
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "x86_64-unknown-freebsd6.2"
+
+define i1 @test(i32 %tmp141, double** %tmp145)
+{
+ %tmp133 = add i32 %tmp141, 1
+ %tmp134 = shl i32 %tmp133, 3
+ %tmp135 = add i32 %tmp134, -8
+ %tmp136 = malloc i8, i32 %tmp135
+ %tmp137 = bitcast i8* %tmp136 to double*
+ store double* %tmp137, double** %tmp145
+ ret i1 false
+}
diff --git a/test/Transforms/InstCombine/2007-04-08-SingleEltVectorCrash.ll b/test/Transforms/InstCombine/2007-04-08-SingleEltVectorCrash.ll
new file mode 100644
index 0000000..22eb2c2
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-04-08-SingleEltVectorCrash.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR1304
+
+define i64 @bork(<1 x i64> %vec) {
+ %tmp = extractelement <1 x i64> %vec, i32 0
+ ret i64 %tmp
+}
diff --git a/test/Transforms/InstCombine/2007-05-04-Crash.ll b/test/Transforms/InstCombine/2007-05-04-Crash.ll
new file mode 100644
index 0000000..9f50d8a
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-05-04-Crash.ll
@@ -0,0 +1,30 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR1384
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "i686-apple-darwin8"
+ %struct.CFRuntimeBase = type { i32, [4 x i8] }
+ %struct.CGColor = type opaque
+ %struct.CGColorSpace = type { %struct.CFRuntimeBase, i8, i8, i8, i32, i32, i32, %struct.CGColor*, float*, %struct.CGMD5Signature, %struct.CGMD5Signature*, [0 x %struct.CGColorSpaceDescriptor] }
+ %struct.CGColorSpaceCalibratedRGBData = type { [3 x float], [3 x float], [3 x float], [9 x float] }
+ %struct.CGColorSpaceDescriptor = type { %struct.CGColorSpaceCalibratedRGBData }
+ %struct.CGColorSpaceLabData = type { [3 x float], [3 x float], [4 x float] }
+ %struct.CGMD5Signature = type { [16 x i8], i8 }
+
+declare fastcc %struct.CGColorSpace* @CGColorSpaceCreate(i32, i32)
+
+declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
+
+define %struct.CGColorSpace* @CGColorSpaceCreateLab(float* %whitePoint, float* %blackPoint, float* %range) {
+entry:
+ %tmp17 = call fastcc %struct.CGColorSpace* @CGColorSpaceCreate( i32 5, i32 3 ) ; <%struct.CGColorSpace*> [#uses=2]
+ %tmp28 = getelementptr %struct.CGColorSpace* %tmp17, i32 0, i32 11 ; <[0 x %struct.CGColorSpaceDescriptor]*> [#uses=1]
+ %tmp29 = getelementptr [0 x %struct.CGColorSpaceDescriptor]* %tmp28, i32 0, i32 0 ; <%struct.CGColorSpaceDescriptor*> [#uses=1]
+ %tmp30 = getelementptr %struct.CGColorSpaceDescriptor* %tmp29, i32 0, i32 0 ; <%struct.CGColorSpaceCalibratedRGBData*> [#uses=1]
+ %tmp3031 = bitcast %struct.CGColorSpaceCalibratedRGBData* %tmp30 to %struct.CGColorSpaceLabData* ; <%struct.CGColorSpaceLabData*> [#uses=1]
+ %tmp45 = getelementptr %struct.CGColorSpaceLabData* %tmp3031, i32 0, i32 2 ; <[4 x float]*> [#uses=1]
+ %tmp46 = getelementptr [4 x float]* %tmp45, i32 0, i32 0 ; <float*> [#uses=1]
+ %tmp4648 = bitcast float* %tmp46 to i8* ; <i8*> [#uses=1]
+ call void @llvm.memcpy.i32( i8* %tmp4648, i8* null, i32 16, i32 4 )
+ ret %struct.CGColorSpace* %tmp17
+}
diff --git a/test/Transforms/InstCombine/2007-05-10-icmp-or.ll b/test/Transforms/InstCombine/2007-05-10-icmp-or.ll
new file mode 100644
index 0000000..4af5dfe
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-05-10-icmp-or.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -disable-output
+define i1 @test(i32 %tmp9) {
+ %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1]
+ %tmp11.not = icmp sgt i32 %tmp9, 255 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp20, %tmp11.not ; <i1> [#uses=1]
+ ret i1 %bothcond
+}
+
diff --git a/test/Transforms/InstCombine/2007-05-14-Crash.ll b/test/Transforms/InstCombine/2007-05-14-Crash.ll
new file mode 100644
index 0000000..a3c010d
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-05-14-Crash.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -disable-output
+
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "powerpc-apple-darwin8.8.0"
+
+%struct.abc = type { i32, [32 x i8] }
+%struct.def = type { i8**, %struct.abc }
+ %struct.anon = type <{ }>
+
+define i8* @foo(%struct.anon* %deviceRef, %struct.abc* %pCap) {
+entry:
+ %tmp1 = bitcast %struct.anon* %deviceRef to %struct.def*
+ %tmp3 = getelementptr %struct.def* %tmp1, i32 0, i32 1
+ %tmp35 = bitcast %struct.abc* %tmp3 to i8*
+ ret i8* %tmp35
+}
+
+
diff --git a/test/Transforms/InstCombine/2007-05-18-CastFoldBug.ll b/test/Transforms/InstCombine/2007-05-18-CastFoldBug.ll
new file mode 100644
index 0000000..40818d4
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-05-18-CastFoldBug.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep {call.*sret}
+; Make sure instcombine doesn't drop the sret attribute.
+
+define void @blah(i16* %tmp10) {
+entry:
+ call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend_stret to void (i16* sret )*)( i16* %tmp10 sret )
+ ret void
+}
+
+declare i8* @objc_msgSend_stret(i8*, i8*, ...)
diff --git a/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll b/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll
new file mode 100644
index 0000000..62b9351
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -S | grep {ashr}
+; PR1499
+
+define void @av_cmp_q_cond_true(i32* %retval, i32* %tmp9, i64* %tmp10) {
+newFuncRoot:
+ br label %cond_true
+
+return.exitStub: ; preds = %cond_true
+ ret void
+
+cond_true: ; preds = %newFuncRoot
+ %tmp30 = load i64* %tmp10 ; <i64> [#uses=1]
+ %.cast = zext i32 63 to i64 ; <i64> [#uses=1]
+ %tmp31 = ashr i64 %tmp30, %.cast ; <i64> [#uses=1]
+ %tmp3132 = trunc i64 %tmp31 to i32 ; <i32> [#uses=1]
+ %tmp33 = or i32 %tmp3132, 1 ; <i32> [#uses=1]
+ store i32 %tmp33, i32* %tmp9
+ %tmp34 = load i32* %tmp9 ; <i32> [#uses=1]
+ store i32 %tmp34, i32* %retval
+ br label %return.exitStub
+}
+
diff --git a/test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll b/test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll
new file mode 100644
index 0000000..af539c1
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 true}
+; rdar://5278853
+
+define i1 @test(i32 %tmp468) {
+ %tmp470 = udiv i32 %tmp468, 4 ; <i32> [#uses=2]
+ %tmp475 = icmp ult i32 %tmp470, 1073741824 ; <i1> [#uses=1]
+ ret i1 %tmp475
+}
+
diff --git a/test/Transforms/InstCombine/2007-08-02-InfiniteLoop.ll b/test/Transforms/InstCombine/2007-08-02-InfiniteLoop.ll
new file mode 100644
index 0000000..3f76187
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-08-02-InfiniteLoop.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR1594
+
+define i64 @test(i16 %tmp510, i16 %tmp512) {
+ %W = sext i16 %tmp510 to i32 ; <i32> [#uses=1]
+ %X = sext i16 %tmp512 to i32 ; <i32> [#uses=1]
+ %Y = add i32 %W, %X ; <i32> [#uses=1]
+ %Z = sext i32 %Y to i64 ; <i64> [#uses=1]
+ ret i64 %Z
+}
diff --git a/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll b/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll
new file mode 100644
index 0000000..c27fe0a
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | grep icmp
+; PR1646
+
+@__gthrw_pthread_cancel = alias weak i32 (i32)* @pthread_cancel ; <i32 (i32)*> [#uses=1]
+@__gthread_active_ptr.5335 = internal constant i8* bitcast (i32 (i32)* @__gthrw_pthread_cancel to i8*) ; <i8**> [#uses=1]
+declare extern_weak i32 @pthread_cancel(i32)
+
+define i1 @__gthread_active_p() {
+entry:
+ %tmp1 = load i8** @__gthread_active_ptr.5335, align 4 ; <i8*> [#uses=1]
+ %tmp2 = icmp ne i8* %tmp1, null ; <i1> [#uses=1]
+ ret i1 %tmp2
+}
diff --git a/test/Transforms/InstCombine/2007-09-11-Trampoline.ll b/test/Transforms/InstCombine/2007-09-11-Trampoline.ll
new file mode 100644
index 0000000..d8f3d97
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-09-11-Trampoline.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -S | grep {call i32 @f}
+
+ %struct.FRAME.nest = type { i32, i32 (i32)* }
+ %struct.__builtin_trampoline = type { [10 x i8] }
+
+declare i8* @llvm.init.trampoline(i8*, i8*, i8*)
+
+declare i32 @f(%struct.FRAME.nest* nest , i32 )
+
+define i32 @nest(i32 %n) {
+entry:
+ %FRAME.0 = alloca %struct.FRAME.nest, align 8 ; <%struct.FRAME.nest*> [#uses=3]
+ %TRAMP.216 = alloca [10 x i8], align 16 ; <[10 x i8]*> [#uses=1]
+ %TRAMP.216.sub = getelementptr [10 x i8]* %TRAMP.216, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 0 ; <i32*> [#uses=1]
+ store i32 %n, i32* %tmp3, align 8
+ %FRAME.06 = bitcast %struct.FRAME.nest* %FRAME.0 to i8* ; <i8*> [#uses=1]
+ %tramp = call i8* @llvm.init.trampoline( i8* %TRAMP.216.sub, i8* bitcast (i32 (%struct.FRAME.nest* nest , i32)* @f to i8*), i8* %FRAME.06 ) ; <i8*> [#uses=1]
+ %tmp7 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 1 ; <i32 (i32)**> [#uses=1]
+ %tmp89 = bitcast i8* %tramp to i32 (i32)* ; <i32 (i32)*> [#uses=2]
+ store i32 (i32)* %tmp89, i32 (i32)** %tmp7, align 8
+ %tmp2.i = call i32 %tmp89( i32 1 ) ; <i32> [#uses=1]
+ ret i32 %tmp2.i
+}
diff --git a/test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll b/test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll
new file mode 100644
index 0000000..23ee12b
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep icmp
+; PR1678
+
+@A = alias weak void ()* @B ; <void ()*> [#uses=1]
+
+declare extern_weak void @B()
+
+define i32 @active() {
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp1 = icmp ne void ()* @A, null ; <i1> [#uses=1]
+ %tmp12 = zext i1 %tmp1 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp12
+}
diff --git a/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll b/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll
new file mode 100644
index 0000000..710aff2
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll
@@ -0,0 +1,21 @@
+; RUN: opt < %s -instcombine -S | not grep call
+; RUN: opt < %s -std-compile-opts -S | not grep xyz
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+@.str = internal constant [4 x i8] c"xyz\00" ; <[4 x i8]*> [#uses=1]
+
+define void @foo(i8* %P) {
+entry:
+ %P_addr = alloca i8* ; <i8**> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store i8* %P, i8** %P_addr
+ %tmp = load i8** %P_addr, align 4 ; <i8*> [#uses=1]
+ %tmp1 = getelementptr [4 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
+ call void @llvm.memcpy.i32( i8* %tmp, i8* %tmp1, i32 4, i32 1 )
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
diff --git a/test/Transforms/InstCombine/2007-10-12-Crash.ll b/test/Transforms/InstCombine/2007-10-12-Crash.ll
new file mode 100644
index 0000000..b3d9f02
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-10-12-Crash.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -instcombine -disable-output
+
+ %struct.Ray = type { %struct.Vec, %struct.Vec }
+ %struct.Scene = type { i32 (...)** }
+ %struct.Vec = type { double, double, double }
+
+declare double @_Z9ray_traceRK3VecRK3RayRK5Scene(%struct.Vec*, %struct.Ray*, %struct.Scene*)
+
+define i32 @main(i32 %argc, i8** %argv) {
+entry:
+ %tmp3 = alloca %struct.Ray, align 4 ; <%struct.Ray*> [#uses=2]
+ %tmp97 = icmp slt i32 0, 512 ; <i1> [#uses=1]
+ br i1 %tmp97, label %bb71, label %bb108
+
+bb29: ; preds = %bb62
+ %tmp322 = bitcast %struct.Ray* %tmp3 to %struct.Vec* ; <%struct.Vec*> [#uses=1]
+ %tmp322.0 = getelementptr %struct.Vec* %tmp322, i32 0, i32 0 ; <double*> [#uses=1]
+ store double 0.000000e+00, double* %tmp322.0
+ %tmp57 = call double @_Z9ray_traceRK3VecRK3RayRK5Scene( %struct.Vec* null, %struct.Ray* %tmp3, %struct.Scene* null ) ; <double> [#uses=0]
+ br label %bb62
+
+bb62: ; preds = %bb71, %bb29
+ %tmp65 = icmp slt i32 0, 4 ; <i1> [#uses=1]
+ br i1 %tmp65, label %bb29, label %bb68
+
+bb68: ; preds = %bb62
+ ret i32 0
+
+bb71: ; preds = %entry
+ %tmp74 = icmp slt i32 0, 4 ; <i1> [#uses=1]
+ br i1 %tmp74, label %bb62, label %bb77
+
+bb77: ; preds = %bb71
+ ret i32 0
+
+bb108: ; preds = %entry
+ ret i32 0
+}
diff --git a/test/Transforms/InstCombine/2007-10-28-stacksave.ll b/test/Transforms/InstCombine/2007-10-28-stacksave.ll
new file mode 100644
index 0000000..76bceb6
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-10-28-stacksave.ll
@@ -0,0 +1,47 @@
+; RUN: opt < %s -instcombine -S | grep {call.*stacksave}
+; PR1745
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+@p = weak global i8* null ; <i8**> [#uses=1]
+
+define i32 @main() {
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ br label %lab
+
+lab: ; preds = %cleanup31, %entry
+ %n.0 = phi i32 [ 0, %entry ], [ %tmp25, %cleanup31 ] ; <i32> [#uses=2]
+ %tmp2 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=2]
+ %tmp4 = srem i32 %n.0, 47 ; <i32> [#uses=1]
+ %tmp5 = add i32 %tmp4, 1 ; <i32> [#uses=5]
+ %tmp7 = sub i32 %tmp5, 1 ; <i32> [#uses=0]
+ %tmp89 = zext i32 %tmp5 to i64 ; <i64> [#uses=1]
+ %tmp10 = mul i64 %tmp89, 32 ; <i64> [#uses=0]
+ %tmp12 = mul i32 %tmp5, 4 ; <i32> [#uses=0]
+ %tmp1314 = zext i32 %tmp5 to i64 ; <i64> [#uses=1]
+ %tmp15 = mul i64 %tmp1314, 32 ; <i64> [#uses=0]
+ %tmp17 = mul i32 %tmp5, 4 ; <i32> [#uses=1]
+ %tmp18 = alloca i8, i32 %tmp17 ; <i8*> [#uses=1]
+ %tmp1819 = bitcast i8* %tmp18 to i32* ; <i32*> [#uses=2]
+ %tmp21 = getelementptr i32* %tmp1819, i32 0 ; <i32*> [#uses=1]
+ store i32 1, i32* %tmp21, align 4
+ %tmp2223 = bitcast i32* %tmp1819 to i8* ; <i8*> [#uses=1]
+ volatile store i8* %tmp2223, i8** @p, align 4
+ %tmp25 = add i32 %n.0, 1 ; <i32> [#uses=2]
+ %tmp27 = icmp sle i32 %tmp25, 999999 ; <i1> [#uses=1]
+ %tmp2728 = zext i1 %tmp27 to i8 ; <i8> [#uses=1]
+ %toBool = icmp ne i8 %tmp2728, 0 ; <i1> [#uses=1]
+ br i1 %toBool, label %cleanup31, label %cond_next
+
+cond_next: ; preds = %lab
+ call void @llvm.stackrestore( i8* %tmp2 )
+ ret i32 0
+
+cleanup31: ; preds = %lab
+ call void @llvm.stackrestore( i8* %tmp2 )
+ br label %lab
+}
+
+declare i8* @llvm.stacksave()
+
+declare void @llvm.stackrestore(i8*)
diff --git a/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll b/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll
new file mode 100644
index 0000000..8105b4b
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll
@@ -0,0 +1,35 @@
+; RUN: opt < %s -instcombine -disable-output
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
+target triple = "powerpc-apple-darwin8"
+
+define i32 @test() {
+entry:
+ %tmp50.i17 = icmp slt i32 0, 4 ; <i1> [#uses=1]
+ br i1 %tmp50.i17, label %bb.i, label %calculateColorSpecificBlackLevel.exit
+
+bb.i: ; preds = %entry
+ br label %bb51.i.i
+
+bb27.i.i: ; preds = %bb51.i.i
+ %tmp31.i.i = load i16* null, align 2 ; <i16> [#uses=2]
+ %tmp35.i.i = icmp ult i16 %tmp31.i.i, 1 ; <i1> [#uses=1]
+ %tmp41.i.i = icmp ugt i16 %tmp31.i.i, -1 ; <i1> [#uses=1]
+ %bothcond.i.i = or i1 %tmp35.i.i, %tmp41.i.i ; <i1> [#uses=1]
+ %bothcond1.i.i = zext i1 %bothcond.i.i to i32 ; <i32> [#uses=1]
+ %tmp46.i.i = xor i32 %bothcond1.i.i, 1 ; <i32> [#uses=1]
+ %count.0.i.i = add i32 %count.1.i.i, %tmp46.i.i ; <i32> [#uses=1]
+ %tmp50.i.i = add i32 %x.0.i.i, 2 ; <i32> [#uses=1]
+ br label %bb51.i.i
+
+bb51.i.i: ; preds = %bb27.i.i, %bb.i
+ %count.1.i.i = phi i32 [ %count.0.i.i, %bb27.i.i ], [ 0, %bb.i ] ; <i32> [#uses=1]
+ %x.0.i.i = phi i32 [ %tmp50.i.i, %bb27.i.i ], [ 0, %bb.i ] ; <i32> [#uses=2]
+ %tmp54.i.i = icmp slt i32 %x.0.i.i, 0 ; <i1> [#uses=1]
+ br i1 %tmp54.i.i, label %bb27.i.i, label %bb57.i.i
+
+bb57.i.i: ; preds = %bb51.i.i
+ ret i32 0
+
+calculateColorSpecificBlackLevel.exit: ; preds = %entry
+ ret i32 undef
+}
diff --git a/test/Transforms/InstCombine/2007-10-31-StringCrash.ll b/test/Transforms/InstCombine/2007-10-31-StringCrash.ll
new file mode 100644
index 0000000..220f3e2
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-10-31-StringCrash.ll
@@ -0,0 +1,21 @@
+; RUN: opt < %s -instcombine -disable-output
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+
+declare void @__darwin_gcc3_preregister_frame_info()
+
+define void @_start(i32 %argc, i8** %argv, i8** %envp) {
+entry:
+ %tmp1 = bitcast void ()* @__darwin_gcc3_preregister_frame_info to i32* ; <i32*> [#uses=1]
+ %tmp2 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ %tmp3 = icmp ne i32 %tmp2, 0 ; <i1> [#uses=1]
+ %tmp34 = zext i1 %tmp3 to i8 ; <i8> [#uses=1]
+ %toBool = icmp ne i8 %tmp34, 0 ; <i1> [#uses=1]
+ br i1 %toBool, label %cond_true, label %return
+
+cond_true: ; preds = %entry
+ ret void
+
+return: ; preds = %entry
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll b/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll
new file mode 100644
index 0000000..e1549a0
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR1780
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i686-pc-linux-gnu"
+
+%opaque_t = type opaque
+
+%op_ts = type {opaque, i32}
+
+@g = external global %opaque_t
+@h = external global %op_ts
+
+define i32 @foo() {
+entry:
+ %x = load i8* bitcast (%opaque_t* @g to i8*)
+ %y = load i32* bitcast (%op_ts* @h to i32*)
+ %z = zext i8 %x to i32
+ %r = add i32 %y, %z
+ ret i32 %r
+}
+
diff --git a/test/Transforms/InstCombine/2007-11-15-CompareMiscomp.ll b/test/Transforms/InstCombine/2007-11-15-CompareMiscomp.ll
new file mode 100644
index 0000000..5282739
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-11-15-CompareMiscomp.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep {icmp eq i32 %In, 1}
+; PR1800
+
+define i1 @test(i32 %In) {
+ %c1 = icmp sgt i32 %In, -1
+ %c2 = icmp eq i32 %In, 1
+ %V = and i1 %c1, %c2
+ ret i1 %V
+}
+
diff --git a/test/Transforms/InstCombine/2007-11-22-IcmpCrash.ll b/test/Transforms/InstCombine/2007-11-22-IcmpCrash.ll
new file mode 100644
index 0000000..f71b99c
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-11-22-IcmpCrash.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR1817
+
+define i1 @test1(i32 %X) {
+ %A = icmp slt i32 %X, 10
+ %B = icmp ult i32 %X, 10
+ %C = and i1 %A, %B
+ ret i1 %C
+}
+
+define i1 @test2(i32 %X) {
+ %A = icmp slt i32 %X, 10
+ %B = icmp ult i32 %X, 10
+ %C = or i1 %A, %B
+ ret i1 %C
+}
diff --git a/test/Transforms/InstCombine/2007-11-25-CompatibleAttributes.ll b/test/Transforms/InstCombine/2007-11-25-CompatibleAttributes.ll
new file mode 100644
index 0000000..24394c6
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-11-25-CompatibleAttributes.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | not grep bitcast
+; PR1716
+
+@.str = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
+
+define i32 @main(i32 %argc, i8** %argv) {
+entry:
+ %tmp32 = tail call i32 (i8* noalias , ...) nounwind * bitcast (i32 (i8*, ...) nounwind * @printf to i32 (i8* noalias , ...) nounwind *)( i8* getelementptr ([4 x i8]* @.str, i32 0, i32 0) noalias , i32 0 ) nounwind ; <i32> [#uses=0]
+ ret i32 undef
+}
+
+declare i32 @printf(i8*, ...) nounwind
diff --git a/test/Transforms/InstCombine/2007-12-10-ConstFoldCompare.ll b/test/Transforms/InstCombine/2007-12-10-ConstFoldCompare.ll
new file mode 100644
index 0000000..6420537
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-12-10-ConstFoldCompare.ll
@@ -0,0 +1,9 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i686-pc-linux-gnu"
+; RUN: opt < %s -instcombine -S | not grep {ret i1 0}
+; PR1850
+
+define i1 @test() {
+ %cond = icmp ule i8* inttoptr (i64 4294967297 to i8*), inttoptr (i64 5 to i8*)
+ ret i1 %cond
+}
diff --git a/test/Transforms/InstCombine/2007-12-12-GEPScale.ll b/test/Transforms/InstCombine/2007-12-12-GEPScale.ll
new file mode 100644
index 0000000..cea87f2
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-12-12-GEPScale.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | not grep 1431655764
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+
+define i8* @foo([100 x {i8,i8,i8}]* %x) {
+entry:
+ %p = bitcast [100 x {i8,i8,i8}]* %x to i8*
+ %q = getelementptr i8* %p, i32 -4
+ ret i8* %q
+}
diff --git a/test/Transforms/InstCombine/2007-12-16-AsmNoUnwind.ll b/test/Transforms/InstCombine/2007-12-16-AsmNoUnwind.ll
new file mode 100644
index 0000000..85cf9b6
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-12-16-AsmNoUnwind.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep nounwind
+
+define void @bar() {
+entry:
+ call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"( )
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2007-12-18-AddSelCmpSub.ll b/test/Transforms/InstCombine/2007-12-18-AddSelCmpSub.ll
new file mode 100644
index 0000000..cc89f6d
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-12-18-AddSelCmpSub.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -instcombine -S | grep {add} | count 1
+
+define i32 @foo(i32 %a) {
+entry:
+ %tmp15 = sub i32 99, %a ; <i32> [#uses=2]
+ %tmp16 = icmp slt i32 %tmp15, 0 ; <i1> [#uses=1]
+ %smax = select i1 %tmp16, i32 0, i32 %tmp15 ; <i32> [#uses=1]
+ %tmp12 = add i32 %smax, %a ; <i32> [#uses=1]
+ %tmp13 = add i32 %tmp12, 1 ; <i32> [#uses=1]
+ ret i32 %tmp13
+}
+
+define i32 @bar(i32 %a) {
+entry:
+ %tmp15 = sub i32 99, %a ; <i32> [#uses=2]
+ %tmp16 = icmp slt i32 %tmp15, 0 ; <i1> [#uses=1]
+ %smax = select i1 %tmp16, i32 0, i32 %tmp15 ; <i32> [#uses=1]
+ %tmp12 = add i32 %smax, %a ; <i32> [#uses=1]
+ ret i32 %tmp12
+}
+
+define i32 @fun(i32 %a) {
+entry:
+ %tmp15 = sub i32 99, %a ; <i32> [#uses=1]
+ %tmp16 = icmp slt i32 %a, 0 ; <i1> [#uses=1]
+ %smax = select i1 %tmp16, i32 0, i32 %tmp15 ; <i32> [#uses=1]
+ %tmp12 = add i32 %smax, %a ; <i32> [#uses=1]
+ ret i32 %tmp12
+}
diff --git a/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll b/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll
new file mode 100644
index 0000000..b59548f
--- /dev/null
+++ b/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll
@@ -0,0 +1,89 @@
+; RUN: opt < %s -mem2reg -instcombine -S | grep "ret i32 1" | count 8
+
+define i32 @test1() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp ule i32 %sub, 0
+ %retval = select i1 %cmp, i32 0, i32 1
+ ret i32 %retval
+}
+
+define i32 @test2() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp ugt i32 %sub, 0
+ %retval = select i1 %cmp, i32 1, i32 0
+ ret i32 %retval
+}
+
+define i32 @test3() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp slt i32 %sub, 0
+ %retval = select i1 %cmp, i32 1, i32 0
+ ret i32 %retval
+}
+
+define i32 @test4() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp sle i32 %sub, 0
+ %retval = select i1 %cmp, i32 1, i32 0
+ ret i32 %retval
+}
+
+define i32 @test5() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp sge i32 %sub, 0
+ %retval = select i1 %cmp, i32 0, i32 1
+ ret i32 %retval
+}
+
+define i32 @test6() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp sgt i32 %sub, 0
+ %retval = select i1 %cmp, i32 0, i32 1
+ ret i32 %retval
+}
+
+define i32 @test7() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp eq i32 %sub, 0
+ %retval = select i1 %cmp, i32 0, i32 1
+ ret i32 %retval
+}
+
+define i32 @test8() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp ne i32 %sub, 0
+ %retval = select i1 %cmp, i32 1, i32 0
+ ret i32 %retval
+} \ No newline at end of file
diff --git a/test/Transforms/InstCombine/2008-01-06-BitCastAttributes.ll b/test/Transforms/InstCombine/2008-01-06-BitCastAttributes.ll
new file mode 100644
index 0000000..5f4fa47
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-01-06-BitCastAttributes.ll
@@ -0,0 +1,23 @@
+; Ignore stderr, we expect warnings there
+; RUN: opt < %s -instcombine 2> /dev/null -S | not grep bitcast
+
+define void @a() {
+ ret void
+}
+
+define i32 @b(i32* inreg %x) signext {
+ ret i32 0
+}
+
+define void @c(...) {
+ ret void
+}
+
+define void @g(i32* %y) {
+ call void bitcast (void ()* @a to void (i32*)*)( i32* noalias %y )
+ call <2 x i32> bitcast (i32 (i32*)* @b to <2 x i32> (i32*)*)( i32* inreg null ) ; <<2 x i32>>:1 [#uses=0]
+ %x = call i64 bitcast (i32 (i32*)* @b to i64 (i32)*)( i32 0 ) ; <i64> [#uses=0]
+ call void bitcast (void (...)* @c to void (i32)*)( i32 0 )
+ call void bitcast (void (...)* @c to void (i32)*)( i32 zeroext 0 )
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2008-01-06-CastCrash.ll b/test/Transforms/InstCombine/2008-01-06-CastCrash.ll
new file mode 100644
index 0000000..097a0ce
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-01-06-CastCrash.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define <2 x i32> @f() {
+ ret <2 x i32> undef
+}
+
+define i32 @g() {
+ %x = call i32 bitcast (<2 x i32> ()* @f to i32 ()*)( ) ; <i32> [#uses=1]
+ ret i32 %x
+}
diff --git a/test/Transforms/InstCombine/2008-01-06-VoidCast.ll b/test/Transforms/InstCombine/2008-01-06-VoidCast.ll
new file mode 100644
index 0000000..407ff4d
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-01-06-VoidCast.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | not grep bitcast
+
+define void @f(i16 %y) {
+ ret void
+}
+
+define i32 @g(i32 %y) {
+ %x = call i32 bitcast (void (i16)* @f to i32 (i32)*)( i32 %y ) ; <i32> [#uses=1]
+ ret i32 %x
+}
diff --git a/test/Transforms/InstCombine/2008-01-13-AndCmpCmp.ll b/test/Transforms/InstCombine/2008-01-13-AndCmpCmp.ll
new file mode 100644
index 0000000..fbc8ba9
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-01-13-AndCmpCmp.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep and
+; PR1907
+
+define i1 @test(i32 %c84.17) {
+ %tmp2696 = icmp ne i32 %c84.17, 34 ; <i1> [#uses=2]
+ %tmp2699 = icmp sgt i32 %c84.17, -1 ; <i1> [#uses=1]
+ %tmp2703 = and i1 %tmp2696, %tmp2699 ; <i1> [#uses=1]
+ ret i1 %tmp2703
+}
diff --git a/test/Transforms/InstCombine/2008-01-13-NoBitCastAttributes.ll b/test/Transforms/InstCombine/2008-01-13-NoBitCastAttributes.ll
new file mode 100644
index 0000000..7b3281f
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-01-13-NoBitCastAttributes.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | grep bitcast | count 2
+
+define i32 @b(i32* inreg %x) signext {
+ ret i32 0
+}
+
+define void @c(...) {
+ ret void
+}
+
+define void @g(i32* %y) {
+ call i32 bitcast (i32 (i32*)* @b to i32 (i32)*)( i32 zeroext 0 ) ; <i32>:2 [#uses=0]
+ call void bitcast (void (...)* @c to void (i32*)*)( i32* sret null )
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2008-01-14-DoubleNest.ll b/test/Transforms/InstCombine/2008-01-14-DoubleNest.ll
new file mode 100644
index 0000000..6401dfd
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-01-14-DoubleNest.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -disable-output
+
+ %struct.FRAME.nest = type { i32, i32 (i32*)* }
+ %struct.__builtin_trampoline = type { [10 x i8] }
+
+declare i8* @llvm.init.trampoline(i8*, i8*, i8*) nounwind
+
+declare i32 @f(%struct.FRAME.nest* nest , i32*)
+
+define i32 @nest(i32 %n) {
+entry:
+ %FRAME.0 = alloca %struct.FRAME.nest, align 8 ; <%struct.FRAME.nest*> [#uses=3]
+ %TRAMP.216 = alloca [10 x i8], align 16 ; <[10 x i8]*> [#uses=1]
+ %TRAMP.216.sub = getelementptr [10 x i8]* %TRAMP.216, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 0 ; <i32*> [#uses=1]
+ store i32 %n, i32* %tmp3, align 8
+ %FRAME.06 = bitcast %struct.FRAME.nest* %FRAME.0 to i8* ; <i8*> [#uses=1]
+ %tramp = call i8* @llvm.init.trampoline( i8* %TRAMP.216.sub, i8* bitcast (i32 (%struct.FRAME.nest*, i32*)* @f to i8*), i8* %FRAME.06 ) ; <i8*> [#uses=1]
+ %tmp7 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 1 ; <i32 (i32*)**> [#uses=1]
+ %tmp89 = bitcast i8* %tramp to i32 (i32*)* ; <i32 (i32*)*> [#uses=2]
+ store i32 (i32*)* %tmp89, i32 (i32*)** %tmp7, align 8
+ %tmp2.i = call i32 %tmp89( i32* nest null ) ; <i32> [#uses=1]
+ ret i32 %tmp2.i
+}
diff --git a/test/Transforms/InstCombine/2008-01-14-VarArgTrampoline.ll b/test/Transforms/InstCombine/2008-01-14-VarArgTrampoline.ll
new file mode 100644
index 0000000..9bb9408
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-01-14-VarArgTrampoline.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -S | grep zeroext
+
+ %struct.FRAME.nest = type { i32, i32 (...)* }
+ %struct.__builtin_trampoline = type { [10 x i8] }
+
+declare i8* @llvm.init.trampoline(i8*, i8*, i8*) nounwind
+
+declare i32 @f(%struct.FRAME.nest* nest , ...)
+
+define i32 @nest(i32 %n) {
+entry:
+ %FRAME.0 = alloca %struct.FRAME.nest, align 8 ; <%struct.FRAME.nest*> [#uses=3]
+ %TRAMP.216 = alloca [10 x i8], align 16 ; <[10 x i8]*> [#uses=1]
+ %TRAMP.216.sub = getelementptr [10 x i8]* %TRAMP.216, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 0 ; <i32*> [#uses=1]
+ store i32 %n, i32* %tmp3, align 8
+ %FRAME.06 = bitcast %struct.FRAME.nest* %FRAME.0 to i8* ; <i8*> [#uses=1]
+ %tramp = call i8* @llvm.init.trampoline( i8* %TRAMP.216.sub, i8* bitcast (i32 (%struct.FRAME.nest*, ...)* @f to i8*), i8* %FRAME.06 ) ; <i8*> [#uses=1]
+ %tmp7 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 1 ; <i32 (...)**> [#uses=1]
+ %tmp89 = bitcast i8* %tramp to i32 (...)* ; <i32 (...)*> [#uses=2]
+ store i32 (...)* %tmp89, i32 (...)** %tmp7, align 8
+ %tmp2.i = call i32 (...)* %tmp89( i32 zeroext 0 ) ; <i32> [#uses=1]
+ ret i32 %tmp2.i
+}
diff --git a/test/Transforms/InstCombine/2008-01-21-MismatchedCastAndCompare.ll b/test/Transforms/InstCombine/2008-01-21-MismatchedCastAndCompare.ll
new file mode 100644
index 0000000..5ff23a3
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-01-21-MismatchedCastAndCompare.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; PR1940
+
+define i1 @test1(i8 %A, i8 %B) {
+ %a = zext i8 %A to i32
+ %b = zext i8 %B to i32
+ %c = icmp sgt i32 %a, %b
+ ret i1 %c
+; CHECK: %c = icmp ugt i8 %A, %B
+; CHECK: ret i1 %c
+}
+
+define i1 @test2(i8 %A, i8 %B) {
+ %a = sext i8 %A to i32
+ %b = sext i8 %B to i32
+ %c = icmp ugt i32 %a, %b
+ ret i1 %c
+; CHECK: %c = icmp ugt i8 %A, %B
+; CHECK: ret i1 %c
+}
diff --git a/test/Transforms/InstCombine/2008-01-21-MulTrunc.ll b/test/Transforms/InstCombine/2008-01-21-MulTrunc.ll
new file mode 100644
index 0000000..87c2b75
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-01-21-MulTrunc.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+define i16 @test1(i16 %a) {
+ %tmp = zext i16 %a to i32 ; <i32> [#uses=2]
+ %tmp21 = lshr i32 %tmp, 8 ; <i32> [#uses=1]
+; CHECK: %tmp21 = lshr i16 %a, 8
+ %tmp5 = mul i32 %tmp, 5 ; <i32> [#uses=1]
+; CHECK: %tmp5 = mul i16 %a, 5
+ %tmp.upgrd.32 = or i32 %tmp21, %tmp5 ; <i32> [#uses=1]
+; CHECK: %tmp.upgrd.32 = or i16 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16 ; <i16> [#uses=1]
+ ret i16 %tmp.upgrd.3
+; CHECK: ret i16 %tmp.upgrd.32
+}
+
diff --git a/test/Transforms/InstCombine/2008-01-27-FloatSelect.ll b/test/Transforms/InstCombine/2008-01-27-FloatSelect.ll
new file mode 100644
index 0000000..c161bcc
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-01-27-FloatSelect.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep select
+
+define double @fold(i1 %a, double %b) {
+%s = select i1 %a, double 0., double 1.
+%c = fdiv double %b, %s
+ret double %c
+}
diff --git a/test/Transforms/InstCombine/2008-01-29-AddICmp.ll b/test/Transforms/InstCombine/2008-01-29-AddICmp.ll
new file mode 100644
index 0000000..28a94ce
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-01-29-AddICmp.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -instcombine -S | not grep {a.off}
+; PR1949
+
+define i1 @test1(i32 %a) {
+ %a.off = add i32 %a, 4 ; <i32> [#uses=1]
+ %C = icmp ult i32 %a.off, 4 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test2(i32 %a) {
+ %a.off = sub i32 %a, 4 ; <i32> [#uses=1]
+ %C = icmp ugt i32 %a.off, -5 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test3(i32 %a) {
+ %a.off = add i32 %a, 4 ; <i32> [#uses=1]
+ %C = icmp slt i32 %a.off, 2147483652 ; <i1> [#uses=1]
+ ret i1 %C
+}
diff --git a/test/Transforms/InstCombine/2008-02-13-MulURem.ll b/test/Transforms/InstCombine/2008-02-13-MulURem.ll
new file mode 100644
index 0000000..a88c510
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-02-13-MulURem.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep rem
+; PR1933
+
+define i32 @fold(i32 %a) {
+ %s = mul i32 %a, 3
+ %c = urem i32 %s, 3
+ ret i32 %c
+}
diff --git a/test/Transforms/InstCombine/2008-02-16-SDivOverflow.ll b/test/Transforms/InstCombine/2008-02-16-SDivOverflow.ll
new file mode 100644
index 0000000..af61c15
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-02-16-SDivOverflow.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {ret i.* 0} | count 2
+; PR2048
+
+define i32 @i(i32 %a) {
+ %tmp1 = sdiv i32 %a, -1431655765
+ %tmp2 = sdiv i32 %tmp1, 3
+ ret i32 %tmp2
+}
+
+define i8 @j(i8 %a) {
+ %tmp1 = sdiv i8 %a, 64
+ %tmp2 = sdiv i8 %tmp1, 3
+ ret i8 %tmp2
+}
diff --git a/test/Transforms/InstCombine/2008-02-16-SDivOverflow2.ll b/test/Transforms/InstCombine/2008-02-16-SDivOverflow2.ll
new file mode 100644
index 0000000..d26dec1
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-02-16-SDivOverflow2.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {sdiv i8 \%a, 9}
+; PR2048
+
+define i8 @i(i8 %a) {
+ %tmp1 = sdiv i8 %a, -3
+ %tmp2 = sdiv i8 %tmp1, -3
+ ret i8 %tmp2
+}
+
diff --git a/test/Transforms/InstCombine/2008-02-23-MulSub.ll b/test/Transforms/InstCombine/2008-02-23-MulSub.ll
new file mode 100644
index 0000000..bb21c4b
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-02-23-MulSub.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep mul
+
+define i26 @test(i26 %a) nounwind {
+entry:
+ %_add = mul i26 %a, 2885 ; <i26> [#uses=1]
+ %_shl2 = mul i26 %a, 2884 ; <i26> [#uses=1]
+ %_sub = sub i26 %_add, %_shl2 ; <i26> [#uses=1]
+ ret i26 %_sub
+}
diff --git a/test/Transforms/InstCombine/2008-02-28-OrFCmpCrash.ll b/test/Transforms/InstCombine/2008-02-28-OrFCmpCrash.ll
new file mode 100644
index 0000000..7f8bd4f
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-02-28-OrFCmpCrash.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; rdar://5771353
+
+define float @test(float %x, x86_fp80 %y) nounwind readonly {
+entry:
+ %tmp67 = fcmp uno x86_fp80 %y, 0xK00000000000000000000 ; <i1> [#uses=1]
+ %tmp71 = fcmp uno float %x, 0.000000e+00 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp67, %tmp71 ; <i1> [#uses=1]
+ br i1 %bothcond, label %bb74, label %bb80
+
+bb74: ; preds = %entry
+ ret float 0.000000e+00
+
+bb80: ; preds = %entry
+ ret float 0.000000e+00
+}
diff --git a/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll b/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll
new file mode 100644
index 0000000..da7e49e
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {16} | count 1
+
+define i8* @bork(i8** %qux) {
+ %tmp275 = load i8** %qux, align 1
+ %tmp275276 = ptrtoint i8* %tmp275 to i32
+ %tmp277 = add i32 %tmp275276, 16
+ %tmp277278 = inttoptr i32 %tmp277 to i8*
+ ret i8* %tmp277278
+}
diff --git a/test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll b/test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll
new file mode 100644
index 0000000..aa38065
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll
@@ -0,0 +1,15 @@
+;; The bitcast cannot be eliminated because byval arguments need
+;; the correct type, or at least a type of the correct size.
+; RUN: opt < %s -instcombine -S | grep bitcast
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9"
+ %struct.NSRect = type { [4 x float] }
+
+define void @foo(i8* %context) nounwind {
+entry:
+ %tmp1 = bitcast i8* %context to %struct.NSRect* ; <%struct.NSRect*> [#uses=1]
+ call void (i32, ...)* @bar( i32 3, %struct.NSRect* byval align 4 %tmp1 ) nounwind
+ ret void
+}
+
+declare void @bar(i32, ...)
diff --git a/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll b/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll
new file mode 100644
index 0000000..626564d
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep {volatile store}
+
+define void @test() {
+ %votf = alloca <4 x float> ; <<4 x float>*> [#uses=1]
+ volatile store <4 x float> zeroinitializer, <4 x float>* %votf, align 16
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll b/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
new file mode 100644
index 0000000..f2cc725
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin8"
+@g_1 = internal global i32 0 ; <i32*> [#uses=3]
+
+define i32 @main() nounwind {
+entry:
+ %tmp93 = icmp slt i32 0, 10 ; <i1> [#uses=0]
+ %tmp34 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br label %bb
+
+bb: ; preds = %bb, %entry
+ %b.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp6, %bb ] ; <i32> [#uses=1]
+ %tmp3.reg2mem.0 = phi i32 [ %tmp34, %entry ], [ %tmp3, %bb ] ; <i32> [#uses=1]
+ %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1]
+ volatile store i32 %tmp4, i32* @g_1, align 4
+ %tmp6 = add i32 %b.0.reg2mem.0, 1 ; <i32> [#uses=2]
+ %tmp9 = icmp slt i32 %tmp6, 10 ; <i1> [#uses=1]
+ %tmp3 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br i1 %tmp9, label %bb, label %bb11
+
+bb11: ; preds = %bb
+ ret i32 0
+}
+
diff --git a/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll b/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
new file mode 100644
index 0000000..176162d
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
@@ -0,0 +1,21 @@
+; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
+; PR2262
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin8"
+@g_1 = internal global i32 0 ; <i32*> [#uses=3]
+
+define i32 @main(i32 %i) nounwind {
+entry:
+ %tmp93 = icmp slt i32 %i, 10 ; <i1> [#uses=0]
+ %tmp34 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br i1 %tmp93, label %bb11, label %bb
+
+bb: ; preds = %bb, %entry
+ %tmp3 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br label %bb11
+
+bb11: ; preds = %bb
+ %tmp4 = phi i32 [ %tmp34, %entry ], [ %tmp3, %bb ] ; <i32> [#uses=1]
+ ret i32 %tmp4
+}
+
diff --git a/test/Transforms/InstCombine/2008-05-08-LiveStoreDelete.ll b/test/Transforms/InstCombine/2008-05-08-LiveStoreDelete.ll
new file mode 100644
index 0000000..bbd0042
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-05-08-LiveStoreDelete.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -instcombine -S | grep {store i8} | count 3
+; PR2297
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin8"
+
+define i32 @a() nounwind {
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp1 = call i8* @malloc( i32 10 ) nounwind ; <i8*> [#uses=5]
+ %tmp3 = getelementptr i8* %tmp1, i32 1 ; <i8*> [#uses=1]
+ store i8 0, i8* %tmp3, align 1
+ %tmp5 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ store i8 1, i8* %tmp5, align 1
+ %tmp7 = call i32 @strlen( i8* %tmp1 ) nounwind readonly ; <i32> [#uses=1]
+ %tmp9 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ store i8 0, i8* %tmp9, align 1
+ %tmp11 = call i32 (...)* @b( i8* %tmp1 ) nounwind ; <i32> [#uses=0]
+ ret i32 %tmp7
+}
+
+declare i8* @malloc(i32) nounwind
+
+declare i32 @strlen(i8*) nounwind readonly
+
+declare i32 @b(...)
diff --git a/test/Transforms/InstCombine/2008-05-08-StrLenSink.ll b/test/Transforms/InstCombine/2008-05-08-StrLenSink.ll
new file mode 100644
index 0000000..1da2856
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-05-08-StrLenSink.ll
@@ -0,0 +1,32 @@
+; RUN: opt -S -instcombine %s | FileCheck %s
+; PR2297
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin8"
+
+define i32 @a() nounwind {
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp1 = call i8* @malloc( i32 10 ) nounwind ; <i8*> [#uses=5]
+ %tmp3 = getelementptr i8* %tmp1, i32 1 ; <i8*> [#uses=1]
+ store i8 0, i8* %tmp3, align 1
+ %tmp5 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ store i8 1, i8* %tmp5, align 1
+; CHECK: store
+; CHECK: store
+; CHECK-NEXT: strlen
+; CHECK-NEXT: store
+ %tmp7 = call i32 @strlen( i8* %tmp1 ) nounwind readonly ; <i32> [#uses=1]
+ %tmp9 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ store i8 0, i8* %tmp9, align 1
+ %tmp11 = call i32 (...)* @b( i8* %tmp1 ) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret i32 %tmp7
+}
+
+declare i8* @malloc(i32) nounwind
+
+declare i32 @strlen(i8*) nounwind readonly
+
+declare i32 @b(...)
diff --git a/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll b/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll
new file mode 100644
index 0000000..d56a1a0
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll
@@ -0,0 +1,33 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR2303
+ %"struct.std::ctype<char>" = type { %"struct.std::locale::facet", i32*, i8, i32*, i32*, i16*, i8, [256 x i8], [256 x i8], i8 }
+ %"struct.std::locale::facet" = type { i32 (...)**, i32 }
+
+declare i32* @_ZNSt6locale5facet15_S_get_c_localeEv()
+
+declare i32** @__ctype_toupper_loc() readnone
+
+declare i32** @__ctype_tolower_loc() readnone
+
+define void @_ZNSt5ctypeIcEC2EPiPKtbm(%"struct.std::ctype<char>"* %this, i32* %unnamed_arg, i16* %__table, i8 zeroext %__del, i64 %__refs) {
+entry:
+ %tmp8 = invoke i32* @_ZNSt6locale5facet15_S_get_c_localeEv( )
+ to label %invcont unwind label %lpad ; <i32*> [#uses=0]
+
+invcont: ; preds = %entry
+ %tmp32 = invoke i32** @__ctype_toupper_loc( ) readnone
+ to label %invcont31 unwind label %lpad ; <i32**> [#uses=0]
+
+invcont31: ; preds = %invcont
+ %tmp38 = invoke i32** @__ctype_tolower_loc( ) readnone
+ to label %invcont37 unwind label %lpad ; <i32**> [#uses=1]
+
+invcont37: ; preds = %invcont31
+ %tmp39 = load i32** %tmp38, align 8 ; <i32*> [#uses=1]
+ %tmp41 = getelementptr %"struct.std::ctype<char>"* %this, i32 0, i32 4 ; <i32**> [#uses=1]
+ store i32* %tmp39, i32** %tmp41, align 8
+ ret void
+
+lpad: ; preds = %invcont31, %invcont, %entry
+ unreachable
+}
diff --git a/test/Transforms/InstCombine/2008-05-17-InfLoop.ll b/test/Transforms/InstCombine/2008-05-17-InfLoop.ll
new file mode 100644
index 0000000..2939a48
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-05-17-InfLoop.ll
@@ -0,0 +1,23 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR2339
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-s0:0:64-f80:32:32"
+target triple = "i686-pc-linux-gnu"
+
+declare void @BZALLOC(i32)
+
+define void @f(i32) {
+entry:
+ %blockSize100k = alloca i32 ; <i32*> [#uses=2]
+ store i32 %0, i32* %blockSize100k
+ %n = alloca i32 ; <i32*> [#uses=2]
+ load i32* %blockSize100k ; <i32>:1 [#uses=1]
+ store i32 %1, i32* %n
+ load i32* %n ; <i32>:2 [#uses=1]
+ add i32 %2, 2 ; <i32>:3 [#uses=1]
+ mul i32 %3, ptrtoint (i32* getelementptr (i32* null, i32 1) to i32) ; <i32>:4 [#uses=1]
+ call void @BZALLOC( i32 %4 )
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2008-05-18-FoldIntToPtr.ll b/test/Transforms/InstCombine/2008-05-18-FoldIntToPtr.ll
new file mode 100644
index 0000000..b34fc1e
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-05-18-FoldIntToPtr.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 false} | count 2
+; PR2329
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i386-pc-linux-gnu"
+
+define i1 @f1() {
+ ret i1 icmp eq (i8* inttoptr (i32 1 to i8*), i8* inttoptr (i32 2 to i8*))
+}
+
+define i1 @f2() {
+ ret i1 icmp eq (i8* inttoptr (i16 1 to i8*), i8* inttoptr (i16 2 to i8*))
+}
diff --git a/test/Transforms/InstCombine/2008-05-22-IDivVector.ll b/test/Transforms/InstCombine/2008-05-22-IDivVector.ll
new file mode 100644
index 0000000..f7ba99c
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-05-22-IDivVector.ll
@@ -0,0 +1,6 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define <3 x i8> @f(<3 x i8> %i) {
+ %A = sdiv <3 x i8> %i, %i
+ ret <3 x i8> %A
+}
diff --git a/test/Transforms/InstCombine/2008-05-22-NegValVector.ll b/test/Transforms/InstCombine/2008-05-22-NegValVector.ll
new file mode 100644
index 0000000..bf92faf
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-05-22-NegValVector.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | not grep sub
+
+define <3 x i8> @f(<3 x i8> %a) {
+ %A = sub <3 x i8> zeroinitializer, %a
+ %B = mul <3 x i8> %A, <i8 5, i8 5, i8 5>
+ ret <3 x i8> %B
+}
+
diff --git a/test/Transforms/InstCombine/2008-05-23-CompareFold.ll b/test/Transforms/InstCombine/2008-05-23-CompareFold.ll
new file mode 100644
index 0000000..2de5af7
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-05-23-CompareFold.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 false}
+; PR2359
+define i1 @f(i8* %x) {
+entry:
+ %tmp462 = load i8* %x, align 1 ; <i8> [#uses=1]
+ %tmp462463 = sitofp i8 %tmp462 to float ; <float> [#uses=1]
+ %tmp464 = fcmp ugt float %tmp462463, 0x47EFFFFFE0000000 ; <i1>
+ ret i1 %tmp464
+}
+
+
diff --git a/test/Transforms/InstCombine/2008-05-31-AddBool.ll b/test/Transforms/InstCombine/2008-05-31-AddBool.ll
new file mode 100644
index 0000000..5416693
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-05-31-AddBool.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep {xor}
+; PR2389
+
+define i1 @test(i1 %a, i1 %b) {
+ %A = add i1 %a, %b
+ ret i1 %A
+}
diff --git a/test/Transforms/InstCombine/2008-05-31-Bools.ll b/test/Transforms/InstCombine/2008-05-31-Bools.ll
new file mode 100644
index 0000000..a0fe47a
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-05-31-Bools.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -S > %t
+; RUN: grep {xor} %t
+; RUN: grep {and} %t
+; RUN: not grep {div} %t
+
+define i1 @foo1(i1 %a, i1 %b) {
+ %A = sub i1 %a, %b
+ ret i1 %A
+}
+
+define i1 @foo2(i1 %a, i1 %b) {
+ %A = mul i1 %a, %b
+ ret i1 %A
+}
+
+define i1 @foo3(i1 %a, i1 %b) {
+ %A = udiv i1 %a, %b
+ ret i1 %A
+}
+
+define i1 @foo4(i1 %a, i1 %b) {
+ %A = sdiv i1 %a, %b
+ ret i1 %A
+}
diff --git a/test/Transforms/InstCombine/2008-06-05-ashr-crash.ll b/test/Transforms/InstCombine/2008-06-05-ashr-crash.ll
new file mode 100644
index 0000000..5e4a9d0
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-06-05-ashr-crash.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine
+
+define i65 @foo(i65 %x) nounwind {
+entry:
+ %tmp2 = ashr i65 %x, 65 ; <i65> [#uses=1]
+ ret i65 %tmp2
+}
diff --git a/test/Transforms/InstCombine/2008-06-08-ICmpPHI.ll b/test/Transforms/InstCombine/2008-06-08-ICmpPHI.ll
new file mode 100644
index 0000000..917d3ae
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-06-08-ICmpPHI.ll
@@ -0,0 +1,47 @@
+; RUN: opt < %s -instcombine -S | grep {phi i32} | count 2
+
+define void @test() nounwind {
+entry:
+ br label %bb
+
+bb: ; preds = %bb16, %entry
+ %i.0 = phi i32 [ 0, %entry ], [ %indvar.next, %somebb ] ; <i32> [#uses=1]
+ %x.0 = phi i32 [ 37, %entry ], [ %tmp17, %somebb ] ; <i32> [#uses=1]
+ %tmp = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=0]
+ %tmp1 = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=0]
+ %tmp2 = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=1]
+ %tmp3 = icmp eq i32 %tmp2, 0 ; <i1> [#uses=1]
+ br i1 %tmp3, label %bb7, label %bb5
+
+bb5: ; preds = %bb
+ %tmp6 = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=0]
+ br label %bb7
+
+bb7: ; preds = %bb5, %bb
+ %tmp8 = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=0]
+ %tmp9 = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=0]
+ %tmp11 = icmp eq i32 %x.0, 37 ; <i1> [#uses=1]
+ br i1 %tmp11, label %bb14, label %bb16
+
+bb14: ; preds = %bb7
+ %tmp15 = tail call i32 (...)* @bar( ) nounwind ; <i32> [#uses=0]
+ br label %bb16
+
+bb16: ; preds = %bb14, %bb7
+ %tmp17 = tail call i32 (...)* @zap( ) nounwind ; <i32> [#uses=1]
+ %indvar.next = add i32 %i.0, 1 ; <i32> [#uses=2]
+ %exitcond = icmp eq i32 %indvar.next, 42 ; <i1> [#uses=1]
+ br i1 %exitcond, label %return, label %somebb
+
+somebb:
+ br label %bb
+
+return: ; preds = %bb16
+ ret void
+}
+
+declare i32 @bork(...)
+
+declare i32 @bar(...)
+
+declare i32 @zap(...)
diff --git a/test/Transforms/InstCombine/2008-06-13-InfiniteLoopStore.ll b/test/Transforms/InstCombine/2008-06-13-InfiniteLoopStore.ll
new file mode 100644
index 0000000..08959c9
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-06-13-InfiniteLoopStore.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -instcombine -S | grep {store i32} | count 2
+
+@g_139 = global i32 0 ; <i32*> [#uses=2]
+
+define void @func_56(i32 %p_60) nounwind {
+entry:
+ store i32 1, i32* @g_139, align 4
+ %tmp1 = icmp ne i32 %p_60, 0 ; <i1> [#uses=1]
+ %tmp12 = zext i1 %tmp1 to i8 ; <i8> [#uses=1]
+ %toBool = icmp ne i8 %tmp12, 0 ; <i1> [#uses=1]
+ br i1 %toBool, label %bb, label %return
+
+bb: ; preds = %bb, %entry
+ store i32 1, i32* @g_139, align 4
+ br label %bb
+
+return: ; preds = %entry
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/2008-06-13-ReadOnlyCallStore.ll b/test/Transforms/InstCombine/2008-06-13-ReadOnlyCallStore.ll
new file mode 100644
index 0000000..aed1b14
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-06-13-ReadOnlyCallStore.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | grep {store i8} | count 2
+
+define i32 @a(i8* %s) nounwind {
+entry:
+ store i8 0, i8* %s, align 1 ; This store cannot be eliminated!
+ %tmp3 = call i32 @strlen( i8* %s ) nounwind readonly
+ %tmp5 = icmp ne i32 %tmp3, 0
+ br i1 %tmp5, label %bb, label %bb8
+
+bb: ; preds = %entry
+ store i8 0, i8* %s, align 1
+ br label %bb8
+
+bb8:
+ ret i32 %tmp3
+}
+
+declare i32 @strlen(i8*) nounwind readonly
+
diff --git a/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll b/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll
new file mode 100644
index 0000000..05f1c52
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | grep load | count 3
+; PR2471
+
+declare i32 @x(i32*)
+define i32 @b(i32* %a, i32* %b) {
+entry:
+ %tmp1 = load i32* %a
+ %tmp3 = load i32* %b
+ %add = add i32 %tmp1, %tmp3
+ %call = call i32 @x( i32* %a )
+ %tobool = icmp ne i32 %add, 0
+ ; not safe to turn into an uncond load
+ %cond = select i1 %tobool, i32* %b, i32* %a
+ %tmp8 = load i32* %cond
+ ret i32 %tmp8
+}
diff --git a/test/Transforms/InstCombine/2008-06-21-CompareMiscomp.ll b/test/Transforms/InstCombine/2008-06-21-CompareMiscomp.ll
new file mode 100644
index 0000000..c3371c6
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-06-21-CompareMiscomp.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | grep {icmp eq i32 %In, 15}
+; PR2479
+; (See also PR1800.)
+
+define i1 @test(i32 %In) {
+ %c1 = icmp ugt i32 %In, 13
+ %c2 = icmp eq i32 %In, 15
+ %V = and i1 %c1, %c2
+ ret i1 %V
+}
+
diff --git a/test/Transforms/InstCombine/2008-06-24-StackRestore.ll b/test/Transforms/InstCombine/2008-06-24-StackRestore.ll
new file mode 100644
index 0000000..8307834
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-06-24-StackRestore.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -instcombine -S | grep {call.*llvm.stackrestore}
+; PR2488
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i386-pc-linux-gnu"
+@p = weak global i8* null ; <i8**> [#uses=2]
+
+define i32 @main() nounwind {
+entry:
+ %tmp248 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp2752 = alloca i32 ; <i32*> [#uses=2]
+ %tmpcast53 = bitcast i32* %tmp2752 to i8* ; <i8*> [#uses=1]
+ store i32 2, i32* %tmp2752, align 4
+ volatile store i8* %tmpcast53, i8** @p, align 4
+ br label %bb44
+
+bb: ; preds = %bb44
+ ret i32 0
+
+bb44: ; preds = %bb44, %entry
+ %indvar = phi i32 [ 0, %entry ], [ %tmp3857, %bb44 ] ; <i32> [#uses=1]
+ %tmp249 = phi i8* [ %tmp248, %entry ], [ %tmp2, %bb44 ] ; <i8*> [#uses=1]
+ %tmp3857 = add i32 %indvar, 1 ; <i32> [#uses=3]
+ call void @llvm.stackrestore( i8* %tmp249 )
+ %tmp2 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp4 = srem i32 %tmp3857, 1000 ; <i32> [#uses=2]
+ %tmp5 = add i32 %tmp4, 1 ; <i32> [#uses=1]
+ %tmp27 = alloca i32, i32 %tmp5 ; <i32*> [#uses=3]
+ %tmpcast = bitcast i32* %tmp27 to i8* ; <i8*> [#uses=1]
+ store i32 1, i32* %tmp27, align 4
+ %tmp34 = getelementptr i32* %tmp27, i32 %tmp4 ; <i32*> [#uses=1]
+ store i32 2, i32* %tmp34, align 4
+ volatile store i8* %tmpcast, i8** @p, align 4
+ %exitcond = icmp eq i32 %tmp3857, 999999 ; <i1> [#uses=1]
+ br i1 %exitcond, label %bb, label %bb44
+}
+
+declare i8* @llvm.stacksave() nounwind
+
+declare void @llvm.stackrestore(i8*) nounwind
diff --git a/test/Transforms/InstCombine/2008-07-08-AndICmp.ll b/test/Transforms/InstCombine/2008-07-08-AndICmp.ll
new file mode 100644
index 0000000..a12f4bd
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-07-08-AndICmp.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep icmp | count 1
+; PR2330
+
+define i1 @foo(i32 %a, i32 %b) nounwind {
+entry:
+ icmp ult i32 %a, 8 ; <i1>:0 [#uses=1]
+ icmp ult i32 %b, 8 ; <i1>:1 [#uses=1]
+ and i1 %1, %0 ; <i1>:2 [#uses=1]
+ ret i1 %2
+}
diff --git a/test/Transforms/InstCombine/2008-07-08-ShiftOneAndOne.ll b/test/Transforms/InstCombine/2008-07-08-ShiftOneAndOne.ll
new file mode 100644
index 0000000..8245b4d
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-07-08-ShiftOneAndOne.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep {icmp ne i32 \%a}
+; PR2330
+
+define i1 @foo(i32 %a) nounwind {
+entry:
+ %tmp15 = shl i32 1, %a ; <i32> [#uses=1]
+ %tmp237 = and i32 %tmp15, 1 ; <i32> [#uses=1]
+ %toBool = icmp eq i32 %tmp237, 0 ; <i1> [#uses=1]
+ ret i1 %toBool
+}
diff --git a/test/Transforms/InstCombine/2008-07-08-SubAnd.ll b/test/Transforms/InstCombine/2008-07-08-SubAnd.ll
new file mode 100644
index 0000000..0091159
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-07-08-SubAnd.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep -v {i32 8}
+; PR2330
+
+define i32 @a(i32 %a) nounwind {
+entry:
+ %tmp2 = sub i32 8, %a ; <i32> [#uses=1]
+ %tmp3 = and i32 %tmp2, 7 ; <i32> [#uses=1]
+ ret i32 %tmp3
+}
diff --git a/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll b/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
new file mode 100644
index 0000000..ccfb118
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
@@ -0,0 +1,26 @@
+; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
+; PR2496
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin8"
+@g_1 = internal global i32 0 ; <i32*> [#uses=3]
+
+define i32 @main() nounwind {
+entry:
+ %tmp93 = icmp slt i32 0, 10 ; <i1> [#uses=0]
+ %tmp34 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br label %bb
+
+bb: ; preds = %bb, %entry
+ %b.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp6, %bb ] ; <i32> [#uses=1]
+ %tmp3.reg2mem.0 = phi i32 [ %tmp3, %bb ], [ %tmp34, %entry ]
+ %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1]
+ volatile store i32 %tmp4, i32* @g_1, align 4
+ %tmp6 = add i32 %b.0.reg2mem.0, 1 ; <i32> [#uses=2]
+ %tmp9 = icmp slt i32 %tmp6, 10 ; <i1> [#uses=1]
+ %tmp3 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br i1 %tmp9, label %bb, label %bb11
+
+bb11: ; preds = %bb
+ ret i32 0
+}
+
diff --git a/test/Transforms/InstCombine/2008-07-09-SubAndError.ll b/test/Transforms/InstCombine/2008-07-09-SubAndError.ll
new file mode 100644
index 0000000..47a7590
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-07-09-SubAndError.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep {sub i32 0}
+; PR2330
+
+define i32 @foo(i32 %a) nounwind {
+entry:
+ %A = sub i32 5, %a
+ %B = and i32 %A, 2
+ ret i32 %B
+}
diff --git a/test/Transforms/InstCombine/2008-07-10-CastSextBool.ll b/test/Transforms/InstCombine/2008-07-10-CastSextBool.ll
new file mode 100644
index 0000000..e911532
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-07-10-CastSextBool.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S | grep {%C = xor i1 %A, true}
+; RUN: opt < %s -instcombine -S | grep {ret i1 false}
+; PR2539
+
+define i1 @test1(i1 %A) {
+ %B = zext i1 %A to i32
+ %C = icmp slt i32 %B, 1
+ ret i1 %C
+}
+
+
+define i1 @test2(i1 zeroext %b) {
+entry:
+ %cmptmp = icmp slt i1 %b, true ; <i1> [#uses=1]
+ ret i1 %cmptmp
+}
+
diff --git a/test/Transforms/InstCombine/2008-07-10-ICmpBinOp.ll b/test/Transforms/InstCombine/2008-07-10-ICmpBinOp.ll
new file mode 100644
index 0000000..76e3039
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-07-10-ICmpBinOp.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | not grep add
+; RUN: opt < %s -instcombine -S | not grep mul
+; PR2330
+
+define i1 @f(i32 %x, i32 %y) nounwind {
+entry:
+ %A = add i32 %x, 5
+ %B = add i32 %y, 5
+ %C = icmp eq i32 %A, %B
+ ret i1 %C
+}
+
+define i1 @g(i32 %x, i32 %y) nounwind {
+entry:
+ %A = mul i32 %x, 5
+ %B = mul i32 %y, 5
+ %C = icmp eq i32 %A, %B
+ ret i1 %C
+}
diff --git a/test/Transforms/InstCombine/2008-07-11-RemAnd.ll b/test/Transforms/InstCombine/2008-07-11-RemAnd.ll
new file mode 100644
index 0000000..bf53451
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-07-11-RemAnd.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep rem
+; PR2330
+
+define i32 @a(i32 %b) nounwind {
+entry:
+ srem i32 %b, 8 ; <i32>:0 [#uses=1]
+ and i32 %0, 1 ; <i32>:1 [#uses=1]
+ ret i32 %1
+}
diff --git a/test/Transforms/InstCombine/2008-07-13-DivZero.ll b/test/Transforms/InstCombine/2008-07-13-DivZero.ll
new file mode 100644
index 0000000..be1f8c2
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-07-13-DivZero.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | grep {lshr.*3}
+; RUN: opt < %s -instcombine -S | grep {call .*%cond}
+; PR2506
+
+; We can simplify the operand of udiv to '8', but not the operand to the
+; call. If the callee never returns, we can't assume the div is reachable.
+define i32 @a(i32 %x, i32 %y) {
+entry:
+ %tobool = icmp ne i32 %y, 0 ; <i1> [#uses=1]
+ %cond = select i1 %tobool, i32 8, i32 0 ; <i32> [#uses=2]
+ %call = call i32 @b( i32 %cond ) ; <i32> [#uses=0]
+ %div = udiv i32 %x, %cond ; <i32> [#uses=1]
+ ret i32 %div
+}
+
+declare i32 @b(i32)
diff --git a/test/Transforms/InstCombine/2008-07-16-fsub.ll b/test/Transforms/InstCombine/2008-07-16-fsub.ll
new file mode 100644
index 0000000..672b4e9
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-07-16-fsub.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep sub
+; PR2553
+
+define double @test(double %X) nounwind {
+ ; fsub of self can't be optimized away.
+ %Y = fsub double %X, %X
+ ret double %Y
+}
diff --git a/test/Transforms/InstCombine/2008-07-16-sse2_storel_dq.ll b/test/Transforms/InstCombine/2008-07-16-sse2_storel_dq.ll
new file mode 100644
index 0000000..501d8a6
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-07-16-sse2_storel_dq.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | not grep {store }
+; PR2296
+
+@G = common global double 0.000000e+00, align 16
+
+define void @x(<2 x i64> %y) nounwind {
+entry:
+ bitcast <2 x i64> %y to <4 x i32>
+ call void @llvm.x86.sse2.storel.dq( i8* bitcast (double* @G to i8*), <4 x i32> %0 ) nounwind
+ ret void
+}
+
+declare void @llvm.x86.sse2.storel.dq(i8*, <4 x i32>) nounwind
diff --git a/test/Transforms/InstCombine/2008-08-05-And.ll b/test/Transforms/InstCombine/2008-08-05-And.ll
new file mode 100644
index 0000000..9773c2d
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-08-05-And.ll
@@ -0,0 +1,23 @@
+; RUN: opt < %s -instcombine -S | not grep or
+; PR2629
+
+define void @f(i8* %x) nounwind {
+entry:
+ br label %bb
+
+bb:
+ %g1 = getelementptr i8* %x, i32 0
+ %l1 = load i8* %g1, align 1
+ %s1 = sub i8 %l1, 6
+ %c1 = icmp ugt i8 %s1, 2
+ %s2 = sub i8 %l1, 10
+ %c2 = icmp ugt i8 %s2, 2
+ %a1 = and i1 %c1, %c2
+ br i1 %a1, label %incompatible, label %okay
+
+okay:
+ ret void
+
+incompatible:
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2008-08-17-ICmpXorSignbit.ll b/test/Transforms/InstCombine/2008-08-17-ICmpXorSignbit.ll
new file mode 100644
index 0000000..e9081f0
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-08-17-ICmpXorSignbit.ll
@@ -0,0 +1,41 @@
+; RUN: opt < %s -instcombine -S | not grep xor
+
+define i1 @test1(i8 %x, i8 %y) {
+ %X = xor i8 %x, 128
+ %Y = xor i8 %y, 128
+ %tmp = icmp slt i8 %X, %Y
+ ret i1 %tmp
+}
+
+define i1 @test2(i8 %x, i8 %y) {
+ %X = xor i8 %x, 128
+ %Y = xor i8 %y, 128
+ %tmp = icmp ult i8 %X, %Y
+ ret i1 %tmp
+}
+
+define i1 @test3(i8 %x) {
+ %X = xor i8 %x, 128
+ %tmp = icmp uge i8 %X, 15
+ ret i1 %tmp
+}
+
+define i1 @test4(i8 %x, i8 %y) {
+ %X = xor i8 %x, 127
+ %Y = xor i8 %y, 127
+ %tmp = icmp slt i8 %X, %Y
+ ret i1 %tmp
+}
+
+define i1 @test5(i8 %x, i8 %y) {
+ %X = xor i8 %x, 127
+ %Y = xor i8 %y, 127
+ %tmp = icmp ult i8 %X, %Y
+ ret i1 %tmp
+}
+
+define i1 @test6(i8 %x) {
+ %X = xor i8 %x, 127
+ %tmp = icmp uge i8 %X, 15
+ ret i1 %tmp
+}
diff --git a/test/Transforms/InstCombine/2008-09-02-VectorCrash.ll b/test/Transforms/InstCombine/2008-09-02-VectorCrash.ll
new file mode 100644
index 0000000..7c50141
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-09-02-VectorCrash.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -instcombine
+
+define void @entry(i32 %m_task_id, i32 %start_x, i32 %end_x, i32 %start_y, i32 %end_y) {
+ br label %1
+
+; <label>:1 ; preds = %4, %0
+ %2 = icmp slt i32 0, %end_y ; <i1> [#uses=1]
+ br i1 %2, label %4, label %3
+
+; <label>:3 ; preds = %1
+ ret void
+
+; <label>:4 ; preds = %6, %1
+ %5 = icmp slt i32 0, %end_x ; <i1> [#uses=1]
+ br i1 %5, label %6, label %1
+
+; <label>:6 ; preds = %4
+ %7 = srem <2 x i32> zeroinitializer, zeroinitializer ; <<2 x i32>> [#uses=1]
+ %8 = extractelement <2 x i32> %7, i32 1 ; <i32> [#uses=1]
+ %9 = select i1 false, i32 0, i32 %8 ; <i32> [#uses=1]
+ %10 = insertelement <2 x i32> zeroinitializer, i32 %9, i32 1 ; <<2 x i32>> [#uses=1]
+ %11 = extractelement <2 x i32> %10, i32 1 ; <i32> [#uses=1]
+ %12 = insertelement <4 x i32> zeroinitializer, i32 %11, i32 3 ; <<4 x i32>> [#uses=1]
+ %13 = sitofp <4 x i32> %12 to <4 x float> ; <<4 x float>> [#uses=1]
+ store <4 x float> %13, <4 x float>* null
+ br label %4
+}
diff --git a/test/Transforms/InstCombine/2008-09-29-FoldingOr.ll b/test/Transforms/InstCombine/2008-09-29-FoldingOr.ll
new file mode 100644
index 0000000..31ea94a
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-09-29-FoldingOr.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep {or i1}
+; PR2844
+
+define i32 @test(i32 %p_74) {
+ %A = icmp eq i32 %p_74, 0 ; <i1> [#uses=1]
+ %B = icmp slt i32 %p_74, -638208501 ; <i1> [#uses=1]
+ %or.cond = or i1 %A, %B ; <i1> [#uses=1]
+ %iftmp.10.0 = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %iftmp.10.0
+}
diff --git a/test/Transforms/InstCombine/2008-10-11-DivCompareFold.ll b/test/Transforms/InstCombine/2008-10-11-DivCompareFold.ll
new file mode 100644
index 0000000..fd36d86
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-10-11-DivCompareFold.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 false}
+; PR2697
+
+define i1 @x(i32 %x) nounwind {
+ %div = sdiv i32 %x, 65536 ; <i32> [#uses=1]
+ %cmp = icmp slt i32 %div, -65536
+ ret i1 %cmp
+}
diff --git a/test/Transforms/InstCombine/2008-10-23-ConstFoldWithoutMask.ll b/test/Transforms/InstCombine/2008-10-23-ConstFoldWithoutMask.ll
new file mode 100644
index 0000000..d70d052
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-10-23-ConstFoldWithoutMask.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine
+; PR2940
+
+define i32 @tstid() {
+ %var0 = inttoptr i32 1 to i8* ; <i8*> [#uses=1]
+ %var2 = ptrtoint i8* %var0 to i32 ; <i32> [#uses=1]
+ ret i32 %var2
+}
diff --git a/test/Transforms/InstCombine/2008-11-01-SRemDemandedBits.ll b/test/Transforms/InstCombine/2008-11-01-SRemDemandedBits.ll
new file mode 100644
index 0000000..aa077e2
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-11-01-SRemDemandedBits.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 true}
+; PR2993
+
+define i1 @foo(i32 %x) {
+ %1 = srem i32 %x, -1
+ %2 = icmp eq i32 %1, 0
+ ret i1 %2
+}
diff --git a/test/Transforms/InstCombine/2008-11-08-FCmp.ll b/test/Transforms/InstCombine/2008-11-08-FCmp.ll
new file mode 100644
index 0000000..c636288
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-11-08-FCmp.ll
@@ -0,0 +1,47 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; PR3021
+
+; When inst combining an FCMP with the LHS coming from a uitofp instruction, we
+; can't lower it to signed ICMP instructions.
+
+define i1 @test1(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp ole double %1, 0.000000e+00
+; CHECK: icmp eq i32 %val, 0
+ ret i1 %2
+}
+
+define i1 @test2(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp olt double %1, 0.000000e+00
+ ret i1 %2
+; CHECK: ret i1 false
+}
+
+define i1 @test3(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp oge double %1, 0.000000e+00
+ ret i1 %2
+; CHECK: ret i1 true
+}
+
+define i1 @test4(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp ogt double %1, 0.000000e+00
+; CHECK: icmp ne i32 %val, 0
+ ret i1 %2
+}
+
+define i1 @test5(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp ogt double %1, -4.400000e+00
+ ret i1 %2
+; CHECK: ret i1 true
+}
+
+define i1 @test6(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp olt double %1, -4.400000e+00
+ ret i1 %2
+; CHECK: ret i1 false
+}
diff --git a/test/Transforms/InstCombine/2008-11-20-DivMulRem.ll b/test/Transforms/InstCombine/2008-11-20-DivMulRem.ll
new file mode 100644
index 0000000..b2774d6
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-11-20-DivMulRem.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -instcombine -S > %t
+; RUN: grep urem %t | count 3
+; RUN: grep srem %t | count 1
+; RUN: grep sub %t | count 2
+; RUN: grep add %t | count 1
+; PR3103
+
+define i8 @test1(i8 %x, i8 %y) {
+ %A = udiv i8 %x, %y
+ %B = mul i8 %A, %y
+ %C = sub i8 %x, %B
+ ret i8 %C
+}
+
+define i8 @test2(i8 %x, i8 %y) {
+ %A = sdiv i8 %x, %y
+ %B = mul i8 %A, %y
+ %C = sub i8 %x, %B
+ ret i8 %C
+}
+
+define i8 @test3(i8 %x, i8 %y) {
+ %A = udiv i8 %x, %y
+ %B = mul i8 %A, %y
+ %C = sub i8 %B, %x
+ ret i8 %C
+}
+
+define i8 @test4(i8 %x) {
+ %A = udiv i8 %x, 3
+ %B = mul i8 %A, -3
+ %C = sub i8 %x, %B
+ ret i8 %C
+}
diff --git a/test/Transforms/InstCombine/2008-11-27-IDivVector.ll b/test/Transforms/InstCombine/2008-11-27-IDivVector.ll
new file mode 100644
index 0000000..318a80c
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-11-27-IDivVector.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | not grep div
+
+define <2 x i8> @f(<2 x i8> %x) {
+ %A = udiv <2 x i8> %x, <i8 1, i8 1>
+ ret <2 x i8> %A
+}
+
+define <2 x i8> @g(<2 x i8> %x) {
+ %A = sdiv <2 x i8> %x, <i8 1, i8 1>
+ ret <2 x i8> %A
+}
diff --git a/test/Transforms/InstCombine/2008-11-27-MultiplyIntVec.ll b/test/Transforms/InstCombine/2008-11-27-MultiplyIntVec.ll
new file mode 100644
index 0000000..d8c53fa
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-11-27-MultiplyIntVec.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | not grep mul
+
+define <2 x i8> @f(<2 x i8> %x) {
+ %A = mul <2 x i8> %x, <i8 1, i8 1>
+ ret <2 x i8> %A
+}
+
+define <2 x i8> @g(<2 x i8> %x) {
+ %A = mul <2 x i8> %x, <i8 -1, i8 -1>
+ ret <2 x i8> %A
+}
diff --git a/test/Transforms/InstCombine/2008-11-27-UDivNegative.ll b/test/Transforms/InstCombine/2008-11-27-UDivNegative.ll
new file mode 100644
index 0000000..fc90bba
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-11-27-UDivNegative.ll
@@ -0,0 +1,6 @@
+; RUN: opt < %s -instcombine -S | not grep div
+
+define i8 @test(i8 %x) readnone nounwind {
+ %A = udiv i8 %x, 250
+ ret i8 %A
+}
diff --git a/test/Transforms/InstCombine/2008-12-17-SRemNegConstVec.ll b/test/Transforms/InstCombine/2008-12-17-SRemNegConstVec.ll
new file mode 100644
index 0000000..e4c7ebc
--- /dev/null
+++ b/test/Transforms/InstCombine/2008-12-17-SRemNegConstVec.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep {i8 2, i8 2}
+; PR2756
+
+define <2 x i8> @foo(<2 x i8> %x) {
+ %A = srem <2 x i8> %x, <i8 2, i8 -2>
+ ret <2 x i8> %A
+}
diff --git a/test/Transforms/InstCombine/2009-01-05-i128-crash.ll b/test/Transforms/InstCombine/2009-01-05-i128-crash.ll
new file mode 100644
index 0000000..d355e0a
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-01-05-i128-crash.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; PR3235
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define hidden i128 @"\01_gfortrani_max_value"(i32 %length, i32 %signed_flag) nounwind {
+entry:
+ switch i32 %length, label %bb13 [
+ i32 1, label %bb17
+ i32 4, label %bb9
+ i32 8, label %bb5
+ ]
+
+bb5: ; preds = %entry
+ %0 = icmp eq i32 %signed_flag, 0 ; <i1> [#uses=1]
+ %iftmp.28.0 = select i1 %0, i128 18446744073709551615, i128 9223372036854775807 ; <i128> [#uses=1]
+ ret i128 %iftmp.28.0
+
+bb9: ; preds = %entry
+ ret i128 0
+
+bb13: ; preds = %entry
+ ret i128 0
+
+bb17: ; preds = %entry
+ ret i128 0
+}
diff --git a/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll b/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
new file mode 100644
index 0000000..a61a94e
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -instcombine -S > %t
+; RUN: grep {, align 4} %t | count 3
+; RUN: grep {, align 8} %t | count 3
+; rdar://6480438
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+ %struct.Key = type { { i32, i32 } }
+ %struct.anon = type <{ i8, [3 x i8], i32 }>
+
+define i32 @bar(i64 %key_token2) nounwind {
+entry:
+ %iospec = alloca %struct.Key ; <%struct.Key*> [#uses=3]
+ %ret = alloca i32 ; <i32*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %0 = getelementptr %struct.Key* %iospec, i32 0, i32 0 ; <{ i32, i32 }*> [#uses=2]
+ %1 = getelementptr { i32, i32 }* %0, i32 0, i32 0 ; <i32*> [#uses=1]
+ store i32 0, i32* %1, align 4
+ %2 = getelementptr { i32, i32 }* %0, i32 0, i32 1 ; <i32*> [#uses=1]
+ store i32 0, i32* %2, align 4
+ %3 = getelementptr %struct.Key* %iospec, i32 0, i32 0 ; <{ i32, i32 }*> [#uses=1]
+ %4 = bitcast { i32, i32 }* %3 to i64* ; <i64*> [#uses=1]
+ store i64 %key_token2, i64* %4, align 4
+ %5 = call i32 (...)* @foo(%struct.Key* byval align 4 %iospec, i32* %ret) nounwind ; <i32> [#uses=0]
+ %6 = load i32* %ret, align 4 ; <i32> [#uses=1]
+ ret i32 %6
+}
+
+declare i32 @foo(...)
diff --git a/test/Transforms/InstCombine/2009-01-16-PointerAddrSpace.ll b/test/Transforms/InstCombine/2009-01-16-PointerAddrSpace.ll
new file mode 100644
index 0000000..ce62f35
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-01-16-PointerAddrSpace.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | grep {store.*addrspace(1)}
+; PR3335
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+
+define i32 @test(i32* %P) nounwind {
+entry:
+ %Q = bitcast i32* %P to i32 addrspace(1)*
+ store i32 0, i32 addrspace(1)* %Q, align 4
+ ret i32 0
+}
diff --git a/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll b/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll
new file mode 100644
index 0000000..79a2f1f
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll
@@ -0,0 +1,316 @@
+; RUN: opt < %s -simplifycfg -instcombine -S | grep 0x7FF8000000000000 | count 7
+; RUN: opt < %s -simplifycfg -instcombine -S | grep 0x7FF00000FFFFFFFF | count 5
+; RUN: opt < %s -simplifycfg -instcombine -S | grep {0\\.0} | count 3
+; RUN: opt < %s -simplifycfg -instcombine -S | grep {3\\.5} | count 1
+;
+
+; ModuleID = 'apf.c'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+@"\01LC" = internal constant [4 x i8] c"%f\0A\00" ; <[4 x i8]*> [#uses=1]
+
+define void @foo1() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF0000000000000, float* %x, align 4
+ store float 0x7FF8000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+declare i32 @printf(i8*, ...) nounwind
+
+define void @foo2() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF0000000000000, float* %x, align 4
+ store float 0.000000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo3() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF0000000000000, float* %x, align 4
+ store float 3.500000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo4() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF0000000000000, float* %x, align 4
+ store float 0x7FF0000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo5() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF8000000000000, float* %x, align 4
+ store float 0x7FF0000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo6() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF8000000000000, float* %x, align 4
+ store float 0.000000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo7() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF8000000000000, float* %x, align 4
+ store float 3.500000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo8() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF8000000000000, float* %x, align 4
+ store float 0x7FF8000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo9() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0.000000e+00, float* %x, align 4
+ store float 0x7FF8000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo10() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0.000000e+00, float* %x, align 4
+ store float 0x7FF0000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo11() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0.000000e+00, float* %x, align 4
+ store float 0.000000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo12() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0.000000e+00, float* %x, align 4
+ store float 3.500000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo13() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 3.500000e+00, float* %x, align 4
+ store float 0x7FF8000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo14() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 3.500000e+00, float* %x, align 4
+ store float 0x7FF0000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo15() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 3.500000e+00, float* %x, align 4
+ store float 0.000000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo16() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 3.500000e+00, float* %x, align 4
+ store float 3.500000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll b/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll
new file mode 100644
index 0000000..6bc7ce3
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll
@@ -0,0 +1,75 @@
+; RUN: opt < %s -simplifycfg -instcombine -S | grep 0x3FB99999A0000000 | count 2
+; RUN: opt < %s -simplifycfg -instcombine -S | grep 0xBFB99999A0000000 | count 2
+; check constant folding for 'frem'. PR 3316.
+
+; ModuleID = 'tt.c'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+
+define float @test1() nounwind {
+entry:
+ %retval = alloca float ; <float*> [#uses=2]
+ %0 = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %1 = frem double 1.000000e-01, 1.000000e+00 ; <double> [#uses=1]
+ %2 = fptrunc double %1 to float ; <float> [#uses=1]
+ store float %2, float* %0, align 4
+ %3 = load float* %0, align 4 ; <float> [#uses=1]
+ store float %3, float* %retval, align 4
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load float* %retval ; <float> [#uses=1]
+ ret float %retval1
+}
+
+define float @test2() nounwind {
+entry:
+ %retval = alloca float ; <float*> [#uses=2]
+ %0 = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %1 = frem double -1.000000e-01, 1.000000e+00 ; <double> [#uses=1]
+ %2 = fptrunc double %1 to float ; <float> [#uses=1]
+ store float %2, float* %0, align 4
+ %3 = load float* %0, align 4 ; <float> [#uses=1]
+ store float %3, float* %retval, align 4
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load float* %retval ; <float> [#uses=1]
+ ret float %retval1
+}
+
+define float @test3() nounwind {
+entry:
+ %retval = alloca float ; <float*> [#uses=2]
+ %0 = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %1 = frem double 1.000000e-01, -1.000000e+00 ; <double> [#uses=1]
+ %2 = fptrunc double %1 to float ; <float> [#uses=1]
+ store float %2, float* %0, align 4
+ %3 = load float* %0, align 4 ; <float> [#uses=1]
+ store float %3, float* %retval, align 4
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load float* %retval ; <float> [#uses=1]
+ ret float %retval1
+}
+
+define float @test4() nounwind {
+entry:
+ %retval = alloca float ; <float*> [#uses=2]
+ %0 = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %1 = frem double -1.000000e-01, -1.000000e+00 ; <double> [#uses=1]
+ %2 = fptrunc double %1 to float ; <float> [#uses=1]
+ store float %2, float* %0, align 4
+ %3 = load float* %0, align 4 ; <float> [#uses=1]
+ store float %3, float* %retval, align 4
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load float* %retval ; <float> [#uses=1]
+ ret float %retval1
+}
diff --git a/test/Transforms/InstCombine/2009-01-24-EmptyStruct.ll b/test/Transforms/InstCombine/2009-01-24-EmptyStruct.ll
new file mode 100644
index 0000000..4b64b48
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-01-24-EmptyStruct.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine
+; PR3381
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target triple = "x86_64-unknown-linux-gnu"
+ %struct.atomic_t = type { i32 }
+ %struct.inode = type { i32, %struct.mutex }
+ %struct.list_head = type { %struct.list_head*, %struct.list_head* }
+ %struct.lock_class_key = type { }
+ %struct.mutex = type { %struct.atomic_t, %struct.rwlock_t, %struct.list_head }
+ %struct.rwlock_t = type { %struct.lock_class_key }
+
+define void @handle_event(%struct.inode* %bar) nounwind {
+entry:
+ %0 = getelementptr %struct.inode* %bar, i64 -1, i32 1, i32 1 ; <%struct.rwlock_t*> [#uses=1]
+ %1 = bitcast %struct.rwlock_t* %0 to i32* ; <i32*> [#uses=1]
+ store i32 1, i32* %1, align 4
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2009-01-31-InfIterate.ll b/test/Transforms/InstCombine/2009-01-31-InfIterate.ll
new file mode 100644
index 0000000..815c1a9
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-01-31-InfIterate.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; PR3452
+define i128 @test(i64 %A, i64 %B, i1 %C, i128 %Z, i128 %Y, i64* %P, i64* %Q) {
+entry:
+ %tmp2 = trunc i128 %Z to i64
+ %tmp4 = trunc i128 %Y to i64
+ store i64 %tmp2, i64* %P
+ store i64 %tmp4, i64* %Q
+ %x = sub i64 %tmp2, %tmp4
+ %c = sub i64 %tmp2, %tmp4
+ %tmp137 = zext i1 %C to i64
+ %tmp138 = sub i64 %c, %tmp137
+ br label %T
+
+T:
+ %G = phi i64 [%tmp138, %entry], [%tmp2, %Fal]
+ %F = zext i64 %G to i128
+ ret i128 %F
+
+Fal:
+ br label %T
+}
diff --git a/test/Transforms/InstCombine/2009-01-31-Pressure.ll b/test/Transforms/InstCombine/2009-01-31-Pressure.ll
new file mode 100644
index 0000000..c3ee9a3
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-01-31-Pressure.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -S | grep {%B = add i8 %b, %x}
+; PR2698
+
+declare void @use1(i1)
+declare void @use8(i8)
+
+define void @test1(i8 %a, i8 %b, i8 %x) {
+ %A = add i8 %a, %x
+ %B = add i8 %b, %x
+ %C = icmp eq i8 %A, %B
+ call void @use1(i1 %C)
+ ret void
+}
+
+define void @test2(i8 %a, i8 %b, i8 %x) {
+ %A = add i8 %a, %x
+ %B = add i8 %b, %x
+ %C = icmp eq i8 %A, %B
+ call void @use1(i1 %C)
+ call void @use8(i8 %A)
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2009-02-04-FPBitcast.ll b/test/Transforms/InstCombine/2009-02-04-FPBitcast.ll
new file mode 100644
index 0000000..bc6a204
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-02-04-FPBitcast.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine
+; PR3468
+
+define x86_fp80 @cast() {
+ %tmp = bitcast i80 0 to x86_fp80 ; <x86_fp80> [#uses=1]
+ ret x86_fp80 %tmp
+}
+
+define i80 @invcast() {
+ %tmp = bitcast x86_fp80 0xK00000000000000000000 to i80 ; <i80> [#uses=1]
+ ret i80 %tmp
+}
diff --git a/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll b/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
new file mode 100644
index 0000000..b29d8d2
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
@@ -0,0 +1,278 @@
+; RUN: opt < %s -instcombine -scalarrepl -S | not grep { = alloca}
+; rdar://6417724
+; Instcombine shouldn't do anything to this function that prevents promoting the allocas inside it.
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+ %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >" = type { i32* }
+ %"struct.std::_Vector_base<int,std::allocator<int> >" = type { %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl" }
+ %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl" = type { i32*, i32*, i32* }
+ %"struct.std::bidirectional_iterator_tag" = type <{ i8 }>
+ %"struct.std::forward_iterator_tag" = type <{ i8 }>
+ %"struct.std::input_iterator_tag" = type <{ i8 }>
+ %"struct.std::random_access_iterator_tag" = type <{ i8 }>
+ %"struct.std::vector<int,std::allocator<int> >" = type { %"struct.std::_Vector_base<int,std::allocator<int> >" }
+
+define i32* @_Z3fooRSt6vectorIiSaIiEE(%"struct.std::vector<int,std::allocator<int> >"* %X) {
+entry:
+ %0 = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >" ; <%"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"*> [#uses=2]
+ %__first_addr.i.i = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >" ; <%"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"*> [#uses=31]
+ %__last_addr.i.i = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >" ; <%"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"*> [#uses=4]
+ %unnamed_arg.i = alloca %"struct.std::bidirectional_iterator_tag", align 8 ; <%"struct.std::bidirectional_iterator_tag"*> [#uses=1]
+ %1 = alloca %"struct.std::bidirectional_iterator_tag" ; <%"struct.std::bidirectional_iterator_tag"*> [#uses=1]
+ %__first_addr.i = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >" ; <%"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"*> [#uses=2]
+ %2 = alloca %"struct.std::bidirectional_iterator_tag" ; <%"struct.std::bidirectional_iterator_tag"*> [#uses=2]
+ %3 = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >" ; <%"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"*> [#uses=2]
+ %4 = alloca i32 ; <i32*> [#uses=8]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store i32 42, i32* %4, align 4
+ %5 = getelementptr %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0 ; <%"struct.std::_Vector_base<int,std::allocator<int> >"*> [#uses=1]
+ %6 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >"* %5, i32 0, i32 0 ; <%"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"*> [#uses=1]
+ %7 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %6, i32 0, i32 1 ; <i32**> [#uses=1]
+ %8 = load i32** %7, align 4 ; <i32*> [#uses=1]
+ %9 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %8, i32** %9, align 4
+ %10 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0 ; <i32**> [#uses=1]
+ %11 = load i32** %10, align 4 ; <i32*> [#uses=1]
+ %tmp2.i = ptrtoint i32* %11 to i32 ; <i32> [#uses=1]
+ %tmp1.i = inttoptr i32 %tmp2.i to i32* ; <i32*> [#uses=1]
+ %tmp3 = ptrtoint i32* %tmp1.i to i32 ; <i32> [#uses=1]
+ %tmp2 = inttoptr i32 %tmp3 to i32* ; <i32*> [#uses=1]
+ %12 = getelementptr %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0 ; <%"struct.std::_Vector_base<int,std::allocator<int> >"*> [#uses=1]
+ %13 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >"* %12, i32 0, i32 0 ; <%"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"*> [#uses=1]
+ %14 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %13, i32 0, i32 0 ; <i32**> [#uses=1]
+ %15 = load i32** %14, align 4 ; <i32*> [#uses=1]
+ %16 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %15, i32** %16, align 4
+ %17 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0 ; <i32**> [#uses=1]
+ %18 = load i32** %17, align 4 ; <i32*> [#uses=1]
+ %tmp2.i17 = ptrtoint i32* %18 to i32 ; <i32> [#uses=1]
+ %tmp1.i18 = inttoptr i32 %tmp2.i17 to i32* ; <i32*> [#uses=1]
+ %tmp8 = ptrtoint i32* %tmp1.i18 to i32 ; <i32> [#uses=1]
+ %tmp6 = inttoptr i32 %tmp8 to i32* ; <i32*> [#uses=1]
+ %19 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %tmp6, i32** %19
+ %20 = getelementptr %"struct.std::bidirectional_iterator_tag"* %1, i32 0, i32 0 ; <i8*> [#uses=1]
+ %21 = load i8* %20, align 1 ; <i8> [#uses=1]
+ %22 = or i8 %21, 0 ; <i8> [#uses=1]
+ %23 = or i8 %22, 0 ; <i8> [#uses=1]
+ %24 = or i8 %23, 0 ; <i8> [#uses=0]
+ %25 = getelementptr %"struct.std::bidirectional_iterator_tag"* %2, i32 0, i32 0 ; <i8*> [#uses=1]
+ store i8 0, i8* %25, align 1
+ %elt.i = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %val.i = load i32** %elt.i ; <i32*> [#uses=1]
+ %tmp.i = bitcast %"struct.std::bidirectional_iterator_tag"* %unnamed_arg.i to i8* ; <i8*> [#uses=1]
+ %tmp9.i = bitcast %"struct.std::bidirectional_iterator_tag"* %2 to i8* ; <i8*> [#uses=1]
+ call void @llvm.memcpy.i64(i8* %tmp.i, i8* %tmp9.i, i64 1, i32 1)
+ %26 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %val.i, i32** %26
+ %27 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %tmp2, i32** %27
+ %28 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %29 = load i32** %28, align 4 ; <i32*> [#uses=1]
+ %30 = ptrtoint i32* %29 to i32 ; <i32> [#uses=1]
+ %31 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %32 = load i32** %31, align 4 ; <i32*> [#uses=1]
+ %33 = ptrtoint i32* %32 to i32 ; <i32> [#uses=1]
+ %34 = sub i32 %30, %33 ; <i32> [#uses=1]
+ %35 = ashr i32 %34, 2 ; <i32> [#uses=1]
+ %36 = ashr i32 %35, 2 ; <i32> [#uses=1]
+ br label %bb12.i.i
+
+bb.i.i: ; preds = %bb12.i.i
+ %37 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %38 = load i32** %37, align 4 ; <i32*> [#uses=1]
+ %39 = load i32* %38, align 4 ; <i32> [#uses=1]
+ %40 = load i32* %4, align 4 ; <i32> [#uses=1]
+ %41 = icmp eq i32 %39, %40 ; <i1> [#uses=1]
+ %42 = zext i1 %41 to i8 ; <i8> [#uses=1]
+ %toBool.i.i = icmp ne i8 %42, 0 ; <i1> [#uses=1]
+ br i1 %toBool.i.i, label %bb1.i.i, label %bb2.i.i
+
+bb1.i.i: ; preds = %bb.i.i
+ %43 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %44 = load i32** %43, align 4 ; <i32*> [#uses=1]
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb2.i.i: ; preds = %bb.i.i
+ %45 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %46 = load i32** %45, align 4 ; <i32*> [#uses=1]
+ %47 = getelementptr i32* %46, i64 1 ; <i32*> [#uses=1]
+ %48 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %47, i32** %48, align 4
+ %49 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %50 = load i32** %49, align 4 ; <i32*> [#uses=1]
+ %51 = load i32* %50, align 4 ; <i32> [#uses=1]
+ %52 = load i32* %4, align 4 ; <i32> [#uses=1]
+ %53 = icmp eq i32 %51, %52 ; <i1> [#uses=1]
+ %54 = zext i1 %53 to i8 ; <i8> [#uses=1]
+ %toBool3.i.i = icmp ne i8 %54, 0 ; <i1> [#uses=1]
+ br i1 %toBool3.i.i, label %bb4.i.i, label %bb5.i.i
+
+bb4.i.i: ; preds = %bb2.i.i
+ %55 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %56 = load i32** %55, align 4 ; <i32*> [#uses=1]
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb5.i.i: ; preds = %bb2.i.i
+ %57 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %58 = load i32** %57, align 4 ; <i32*> [#uses=1]
+ %59 = getelementptr i32* %58, i64 1 ; <i32*> [#uses=1]
+ %60 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %59, i32** %60, align 4
+ %61 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %62 = load i32** %61, align 4 ; <i32*> [#uses=1]
+ %63 = load i32* %62, align 4 ; <i32> [#uses=1]
+ %64 = load i32* %4, align 4 ; <i32> [#uses=1]
+ %65 = icmp eq i32 %63, %64 ; <i1> [#uses=1]
+ %66 = zext i1 %65 to i8 ; <i8> [#uses=1]
+ %toBool6.i.i = icmp ne i8 %66, 0 ; <i1> [#uses=1]
+ br i1 %toBool6.i.i, label %bb7.i.i, label %bb8.i.i
+
+bb7.i.i: ; preds = %bb5.i.i
+ %67 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %68 = load i32** %67, align 4 ; <i32*> [#uses=1]
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb8.i.i: ; preds = %bb5.i.i
+ %69 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %70 = load i32** %69, align 4 ; <i32*> [#uses=1]
+ %71 = getelementptr i32* %70, i64 1 ; <i32*> [#uses=1]
+ %72 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %71, i32** %72, align 4
+ %73 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %74 = load i32** %73, align 4 ; <i32*> [#uses=1]
+ %75 = load i32* %74, align 4 ; <i32> [#uses=1]
+ %76 = load i32* %4, align 4 ; <i32> [#uses=1]
+ %77 = icmp eq i32 %75, %76 ; <i1> [#uses=1]
+ %78 = zext i1 %77 to i8 ; <i8> [#uses=1]
+ %toBool9.i.i = icmp ne i8 %78, 0 ; <i1> [#uses=1]
+ br i1 %toBool9.i.i, label %bb10.i.i, label %bb11.i.i
+
+bb10.i.i: ; preds = %bb8.i.i
+ %79 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %80 = load i32** %79, align 4 ; <i32*> [#uses=1]
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb11.i.i: ; preds = %bb8.i.i
+ %81 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %82 = load i32** %81, align 4 ; <i32*> [#uses=1]
+ %83 = getelementptr i32* %82, i64 1 ; <i32*> [#uses=1]
+ %84 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %83, i32** %84, align 4
+ %85 = sub i32 %__trip_count.0.i.i, 1 ; <i32> [#uses=1]
+ br label %bb12.i.i
+
+bb12.i.i: ; preds = %bb11.i.i, %entry
+ %__trip_count.0.i.i = phi i32 [ %36, %entry ], [ %85, %bb11.i.i ] ; <i32> [#uses=2]
+ %86 = icmp sgt i32 %__trip_count.0.i.i, 0 ; <i1> [#uses=1]
+ br i1 %86, label %bb.i.i, label %bb13.i.i
+
+bb13.i.i: ; preds = %bb12.i.i
+ %87 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %88 = load i32** %87, align 4 ; <i32*> [#uses=1]
+ %89 = ptrtoint i32* %88 to i32 ; <i32> [#uses=1]
+ %90 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %91 = load i32** %90, align 4 ; <i32*> [#uses=1]
+ %92 = ptrtoint i32* %91 to i32 ; <i32> [#uses=1]
+ %93 = sub i32 %89, %92 ; <i32> [#uses=1]
+ %94 = ashr i32 %93, 2 ; <i32> [#uses=1]
+ switch i32 %94, label %bb26.i.i [
+ i32 1, label %bb22.i.i
+ i32 2, label %bb18.i.i
+ i32 3, label %bb14.i.i
+ ]
+
+bb14.i.i: ; preds = %bb13.i.i
+ %95 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %96 = load i32** %95, align 4 ; <i32*> [#uses=1]
+ %97 = load i32* %96, align 4 ; <i32> [#uses=1]
+ %98 = load i32* %4, align 4 ; <i32> [#uses=1]
+ %99 = icmp eq i32 %97, %98 ; <i1> [#uses=1]
+ %100 = zext i1 %99 to i8 ; <i8> [#uses=1]
+ %toBool15.i.i = icmp ne i8 %100, 0 ; <i1> [#uses=1]
+ br i1 %toBool15.i.i, label %bb16.i.i, label %bb17.i.i
+
+bb16.i.i: ; preds = %bb14.i.i
+ %101 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %102 = load i32** %101, align 4 ; <i32*> [#uses=1]
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb17.i.i: ; preds = %bb14.i.i
+ %103 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %104 = load i32** %103, align 4 ; <i32*> [#uses=1]
+ %105 = getelementptr i32* %104, i64 1 ; <i32*> [#uses=1]
+ %106 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %105, i32** %106, align 4
+ br label %bb18.i.i
+
+bb18.i.i: ; preds = %bb17.i.i, %bb13.i.i
+ %107 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %108 = load i32** %107, align 4 ; <i32*> [#uses=1]
+ %109 = load i32* %108, align 4 ; <i32> [#uses=1]
+ %110 = load i32* %4, align 4 ; <i32> [#uses=1]
+ %111 = icmp eq i32 %109, %110 ; <i1> [#uses=1]
+ %112 = zext i1 %111 to i8 ; <i8> [#uses=1]
+ %toBool19.i.i = icmp ne i8 %112, 0 ; <i1> [#uses=1]
+ br i1 %toBool19.i.i, label %bb20.i.i, label %bb21.i.i
+
+bb20.i.i: ; preds = %bb18.i.i
+ %113 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %114 = load i32** %113, align 4 ; <i32*> [#uses=1]
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb21.i.i: ; preds = %bb18.i.i
+ %115 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %116 = load i32** %115, align 4 ; <i32*> [#uses=1]
+ %117 = getelementptr i32* %116, i64 1 ; <i32*> [#uses=1]
+ %118 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %117, i32** %118, align 4
+ br label %bb22.i.i
+
+bb22.i.i: ; preds = %bb21.i.i, %bb13.i.i
+ %119 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %120 = load i32** %119, align 4 ; <i32*> [#uses=1]
+ %121 = load i32* %120, align 4 ; <i32> [#uses=1]
+ %122 = load i32* %4, align 4 ; <i32> [#uses=1]
+ %123 = icmp eq i32 %121, %122 ; <i1> [#uses=1]
+ %124 = zext i1 %123 to i8 ; <i8> [#uses=1]
+ %toBool23.i.i = icmp ne i8 %124, 0 ; <i1> [#uses=1]
+ br i1 %toBool23.i.i, label %bb24.i.i, label %bb25.i.i
+
+bb24.i.i: ; preds = %bb22.i.i
+ %125 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %126 = load i32** %125, align 4 ; <i32*> [#uses=1]
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb25.i.i: ; preds = %bb22.i.i
+ %127 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %128 = load i32** %127, align 4 ; <i32*> [#uses=1]
+ %129 = getelementptr i32* %128, i64 1 ; <i32*> [#uses=1]
+ %130 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ store i32* %129, i32** %130, align 4
+ br label %bb26.i.i
+
+bb26.i.i: ; preds = %bb25.i.i, %bb13.i.i
+ %131 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0 ; <i32**> [#uses=1]
+ %132 = load i32** %131, align 4 ; <i32*> [#uses=1]
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit: ; preds = %bb26.i.i, %bb24.i.i, %bb20.i.i, %bb16.i.i, %bb10.i.i, %bb7.i.i, %bb4.i.i, %bb1.i.i
+ %.0.0.i.i = phi i32* [ %132, %bb26.i.i ], [ %126, %bb24.i.i ], [ %114, %bb20.i.i ], [ %102, %bb16.i.i ], [ %80, %bb10.i.i ], [ %68, %bb7.i.i ], [ %56, %bb4.i.i ], [ %44, %bb1.i.i ] ; <i32*> [#uses=1]
+ %tmp2.i.i = ptrtoint i32* %.0.0.i.i to i32 ; <i32> [#uses=1]
+ %tmp1.i.i = inttoptr i32 %tmp2.i.i to i32* ; <i32*> [#uses=1]
+ %tmp4.i = ptrtoint i32* %tmp1.i.i to i32 ; <i32> [#uses=1]
+ %tmp3.i = inttoptr i32 %tmp4.i to i32* ; <i32*> [#uses=1]
+ %tmp8.i = ptrtoint i32* %tmp3.i to i32 ; <i32> [#uses=1]
+ %tmp6.i = inttoptr i32 %tmp8.i to i32* ; <i32*> [#uses=1]
+ %tmp12 = ptrtoint i32* %tmp6.i to i32 ; <i32> [#uses=1]
+ %tmp10 = inttoptr i32 %tmp12 to i32* ; <i32*> [#uses=1]
+ %tmp16 = ptrtoint i32* %tmp10 to i32 ; <i32> [#uses=1]
+ br label %return
+
+return: ; preds = %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+ %tmp14 = inttoptr i32 %tmp16 to i32* ; <i32*> [#uses=1]
+ ret i32* %tmp14
+}
+
+declare void @llvm.memcpy.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
diff --git a/test/Transforms/InstCombine/2009-02-21-LoadCST.ll b/test/Transforms/InstCombine/2009-02-21-LoadCST.ll
new file mode 100644
index 0000000..f56fc38
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-02-21-LoadCST.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | grep {ret i32 3679669}
+; PR3595
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i386-pc-linux-gnu"
+
+@.str1 = internal constant [4 x i8] c"\B5%8\00"
+
+define i32 @test() {
+ %rhsv = load i32* bitcast ([4 x i8]* @.str1 to i32*), align 1
+ ret i32 %rhsv
+}
diff --git a/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll b/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll
new file mode 100644
index 0000000..a8349f0
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; PR3667
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i386-pc-linux-gnu"
+
+define void @_ada_c32001b(i32 %tmp5) {
+entry:
+ %max289 = select i1 false, i32 %tmp5, i32 0 ; <i32> [#uses=1]
+ %tmp6 = mul i32 %max289, 4 ; <i32> [#uses=1]
+ %tmp7 = alloca i8, i32 0 ; <i8*> [#uses=1]
+ %tmp8 = bitcast i8* %tmp7 to [0 x [0 x i32]]* ; <[0 x [0 x i32]]*> [#uses=1]
+ %tmp11 = load i32* null, align 1 ; <i32> [#uses=1]
+ %tmp12 = icmp eq i32 %tmp11, 3 ; <i1> [#uses=1]
+ %tmp13 = zext i1 %tmp12 to i8 ; <i8> [#uses=1]
+ %tmp14 = ashr i32 %tmp6, 2 ; <i32> [#uses=1]
+ %tmp15 = bitcast [0 x [0 x i32]]* %tmp8 to i8* ; <i8*> [#uses=1]
+ %tmp16 = mul i32 %tmp14, 4 ; <i32> [#uses=1]
+ %tmp17 = mul i32 1, %tmp16 ; <i32> [#uses=1]
+ %tmp18 = getelementptr i8* %tmp15, i32 %tmp17 ; <i8*> [#uses=1]
+ %tmp19 = bitcast i8* %tmp18 to [0 x i32]* ; <[0 x i32]*> [#uses=1]
+ %tmp20 = bitcast [0 x i32]* %tmp19 to i32* ; <i32*> [#uses=1]
+ %tmp21 = getelementptr i32* %tmp20, i32 0 ; <i32*> [#uses=1]
+ %tmp22 = load i32* %tmp21, align 1 ; <i32> [#uses=1]
+ %tmp23 = icmp eq i32 %tmp22, 4 ; <i1> [#uses=1]
+ %tmp24 = zext i1 %tmp23 to i8 ; <i8> [#uses=1]
+ %toBool709 = icmp ne i8 %tmp13, 0 ; <i1> [#uses=1]
+ %toBool710 = icmp ne i8 %tmp24, 0 ; <i1> [#uses=1]
+ %tmp25 = and i1 %toBool709, %toBool710 ; <i1> [#uses=1]
+ %tmp26 = zext i1 %tmp25 to i8 ; <i8> [#uses=1]
+ %toBool711 = icmp ne i8 %tmp26, 0 ; <i1> [#uses=1]
+ br i1 %toBool711, label %a, label %b
+
+a: ; preds = %entry
+ ret void
+
+b: ; preds = %entry
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll b/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll
new file mode 100644
index 0000000..c617ca4
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; PR3826
+
+define void @0(<4 x i16>*, <4 x i16>*) {
+ %3 = alloca <4 x i16>* ; <<4 x i16>**> [#uses=1]
+ %4 = load <4 x i16>* null, align 1 ; <<4 x i16>> [#uses=1]
+ %5 = ashr <4 x i16> %4, <i16 5, i16 5, i16 5, i16 5> ; <<4 x i16>> [#uses=1]
+ %6 = load <4 x i16>** %3 ; <<4 x i16>*> [#uses=1]
+ store <4 x i16> %5, <4 x i16>* %6, align 1
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2009-03-20-AShrOverShift.ll b/test/Transforms/InstCombine/2009-03-20-AShrOverShift.ll
new file mode 100644
index 0000000..0a07bf3
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-03-20-AShrOverShift.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {ashr i32 %val, 31}
+; PR3851
+
+define i32 @foo2(i32 %val) nounwind {
+entry:
+ %shr = ashr i32 %val, 15 ; <i32> [#uses=3]
+ %shr4 = ashr i32 %shr, 17 ; <i32> [#uses=1]
+ ret i32 %shr4
+ }
diff --git a/test/Transforms/InstCombine/2009-03-24-InfLoop.ll b/test/Transforms/InstCombine/2009-03-24-InfLoop.ll
new file mode 100644
index 0000000..4ce04a1
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-03-24-InfLoop.ll
@@ -0,0 +1,9 @@
+; PR3874
+; RUN: opt < %s -instcombine | llvm-dis
+ define i1 @test(i32 %x) {
+ %A = lshr i32 3968, %x
+ %B = and i32 %A, 1
+ %C = icmp eq i32 %B, 0
+ ret i1 %C
+ }
+
diff --git a/test/Transforms/InstCombine/2009-04-07-MulPromoteToI96.ll b/test/Transforms/InstCombine/2009-04-07-MulPromoteToI96.ll
new file mode 100644
index 0000000..244b22a
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-04-07-MulPromoteToI96.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | grep {mul i64}
+; rdar://6762288
+
+; Instcombine should not promote the mul to i96 because it is definitely
+; not a legal type for the target, and we don't want a libcall.
+
+define i96 @test(i96 %a.4, i96 %b.2) {
+ %tmp1086 = trunc i96 %a.4 to i64 ; <i64> [#uses=1]
+ %tmp836 = trunc i96 %b.2 to i64 ; <i64> [#uses=1]
+ %mul185 = mul i64 %tmp1086, %tmp836 ; <i64> [#uses=1]
+ %tmp544 = zext i64 %mul185 to i96 ; <i96> [#uses=1]
+ ret i96 %tmp544
+}
diff --git a/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll b/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll
new file mode 100644
index 0000000..dd14c6b
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep cmp
+; rdar://6903175
+
+define i1 @f0(i32 *%a) nounwind {
+ %b = load i32* %a, align 4
+ %c = uitofp i32 %b to double
+ %d = fcmp ogt double %c, 0x41EFFFFFFFE00000
+ ret i1 %d
+}
diff --git a/test/Transforms/InstCombine/2009-06-11-StoreAddrSpace.ll b/test/Transforms/InstCombine/2009-06-11-StoreAddrSpace.ll
new file mode 100644
index 0000000..e5355b8
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-06-11-StoreAddrSpace.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep {store i32 0,}
+; PR4366
+
+define void @a() {
+ store i32 0, i32 addrspace(1)* null
+ ret void
+}
diff --git a/test/Transforms/InstCombine/2009-06-16-SRemDemandedBits.ll b/test/Transforms/InstCombine/2009-06-16-SRemDemandedBits.ll
new file mode 100644
index 0000000..6beedf8
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-06-16-SRemDemandedBits.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep srem
+; PR3439
+
+define i32 @a(i32 %x) nounwind {
+entry:
+ %rem = srem i32 %x, 2
+ %and = and i32 %rem, 2
+ ret i32 %and
+}
diff --git a/test/Transforms/InstCombine/2009-07-02-MaskedIntVector.ll b/test/Transforms/InstCombine/2009-07-02-MaskedIntVector.ll
new file mode 100644
index 0000000..41940fe
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-07-02-MaskedIntVector.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; PR4495
+
+define i32 @test(i64 %test) {
+entry:
+ %0 = bitcast <4 x i32> undef to <16 x i8> ; <<16 x i8>> [#uses=1]
+ %t12 = shufflevector <16 x i8> %0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> ; <<16 x i8>> [#uses=1]
+ %t11 = bitcast <16 x i8> %t12 to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %t9 = extractelement <2 x i64> %t11, i32 0 ; <i64> [#uses=1]
+ %t10 = bitcast i64 %t9 to <2 x i32> ; <<2 x i32>> [#uses=1]
+ %t7 = bitcast i64 %test to <2 x i32> ; <<2 x i32>> [#uses=1]
+ %t6 = xor <2 x i32> %t10, %t7 ; <<2 x i32>> [#uses=1]
+ %t1 = extractelement <2 x i32> %t6, i32 0 ; <i32> [#uses=1]
+ ret i32 %t1
+}
diff --git a/test/Transforms/InstCombine/2009-12-17-CmpSelectNull.ll b/test/Transforms/InstCombine/2009-12-17-CmpSelectNull.ll
new file mode 100644
index 0000000..fb7497b
--- /dev/null
+++ b/test/Transforms/InstCombine/2009-12-17-CmpSelectNull.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+@.str254 = internal constant [2 x i8] c".\00"
+@.str557 = internal constant [3 x i8] c"::\00"
+
+define i8* @demangle_qualified(i32 %isfuncname) nounwind {
+entry:
+ %tobool272 = icmp ne i32 %isfuncname, 0
+ %cond276 = select i1 %tobool272, i8* getelementptr inbounds ([2 x i8]* @.str254, i32 0, i32 0), i8* getelementptr inbounds ([3 x i8]* @.str557, i32 0, i32 0) ; <i8*> [#uses=4]
+ %cmp.i504 = icmp eq i8* %cond276, null
+ %rval = getelementptr i8* %cond276, i1 %cmp.i504
+ ret i8* %rval
+}
+
+; CHECK: %cond276 = select i1
+; CHECK: ret i8* %cond276
diff --git a/test/Transforms/InstCombine/2010-01-28-NegativeSRem.ll b/test/Transforms/InstCombine/2010-01-28-NegativeSRem.ll
new file mode 100644
index 0000000..4ab9bf0
--- /dev/null
+++ b/test/Transforms/InstCombine/2010-01-28-NegativeSRem.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; PR6165
+
+define i32 @f() {
+entry:
+ br label %BB1
+
+BB1: ; preds = %BB1, %entry
+; CHECK: BB1:
+ %x = phi i32 [ -29, %entry ], [ 0, %BB1 ] ; <i32> [#uses=2]
+ %rem = srem i32 %x, 2 ; <i32> [#uses=1]
+ %t = icmp eq i32 %rem, -1 ; <i1> [#uses=1]
+ br i1 %t, label %BB2, label %BB1
+; CHECK-NOT: br i1 false
+
+BB2: ; preds = %BB1
+; CHECK: BB2:
+ ret i32 %x
+}
diff --git a/test/Transforms/InstCombine/CPP_min_max.ll b/test/Transforms/InstCombine/CPP_min_max.ll
new file mode 100644
index 0000000..531ce2b
--- /dev/null
+++ b/test/Transforms/InstCombine/CPP_min_max.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep select | not grep {i32\\*}
+
+; This testcase corresponds to PR362, which notices that this horrible code
+; is generated by the C++ front-end and LLVM optimizers, which has lots of
+; loads and other stuff that are unneeded.
+;
+; Instcombine should propagate the load through the select instructions to
+; allow elimination of the extra stuff by the mem2reg pass.
+
+define void @_Z5test1RiS_(i32* %x, i32* %y) {
+entry:
+ %tmp.1.i = load i32* %y ; <i32> [#uses=1]
+ %tmp.3.i = load i32* %x ; <i32> [#uses=1]
+ %tmp.4.i = icmp slt i32 %tmp.1.i, %tmp.3.i ; <i1> [#uses=1]
+ %retval.i = select i1 %tmp.4.i, i32* %y, i32* %x ; <i32*> [#uses=1]
+ %tmp.4 = load i32* %retval.i ; <i32> [#uses=1]
+ store i32 %tmp.4, i32* %x
+ ret void
+}
+
+define void @_Z5test2RiS_(i32* %x, i32* %y) {
+entry:
+ %tmp.0 = alloca i32 ; <i32*> [#uses=2]
+ %tmp.2 = load i32* %x ; <i32> [#uses=2]
+ store i32 %tmp.2, i32* %tmp.0
+ %tmp.3.i = load i32* %y ; <i32> [#uses=1]
+ %tmp.4.i = icmp slt i32 %tmp.2, %tmp.3.i ; <i1> [#uses=1]
+ %retval.i = select i1 %tmp.4.i, i32* %y, i32* %tmp.0 ; <i32*> [#uses=1]
+ %tmp.6 = load i32* %retval.i ; <i32> [#uses=1]
+ store i32 %tmp.6, i32* %y
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/IntPtrCast.ll b/test/Transforms/InstCombine/IntPtrCast.ll
new file mode 100644
index 0000000..4ecbccd
--- /dev/null
+++ b/test/Transforms/InstCombine/IntPtrCast.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:32:32"
+
+define i32* @test(i32* %P) {
+ %V = ptrtoint i32* %P to i32 ; <i32> [#uses=1]
+ %P2 = inttoptr i32 %V to i32* ; <i32*> [#uses=1]
+ ret i32* %P2
+; CHECK: ret i32* %P
+}
+
diff --git a/test/Transforms/InstCombine/JavaCompare.ll b/test/Transforms/InstCombine/JavaCompare.ll
new file mode 100644
index 0000000..7d0edb8
--- /dev/null
+++ b/test/Transforms/InstCombine/JavaCompare.ll
@@ -0,0 +1,14 @@
+; This is the sequence of stuff that the Java front-end expands for a single
+; <= comparison. Check to make sure we turn it into a <= (only)
+
+; RUN: opt < %s -instcombine -S | grep {%c3 = icmp sle i32 %A, %B}
+
+define i1 @le(i32 %A, i32 %B) {
+ %c1 = icmp sgt i32 %A, %B ; <i1> [#uses=1]
+ %tmp = select i1 %c1, i32 1, i32 0 ; <i32> [#uses=1]
+ %c2 = icmp slt i32 %A, %B ; <i1> [#uses=1]
+ %result = select i1 %c2, i32 -1, i32 %tmp ; <i32> [#uses=1]
+ %c3 = icmp sle i32 %result, 0 ; <i1> [#uses=1]
+ ret i1 %c3
+}
+
diff --git a/test/Transforms/InstCombine/README.txt b/test/Transforms/InstCombine/README.txt
new file mode 100644
index 0000000..de043c7
--- /dev/null
+++ b/test/Transforms/InstCombine/README.txt
@@ -0,0 +1,4 @@
+This directory contains test cases for the instcombine transformation. The
+dated tests are actual bug tests, whereas the named tests are used to test
+for features that the this pass should be capable of performing.
+
diff --git a/test/Transforms/InstCombine/add-shrink.ll b/test/Transforms/InstCombine/add-shrink.ll
new file mode 100644
index 0000000..cc57478
--- /dev/null
+++ b/test/Transforms/InstCombine/add-shrink.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {add nsw i32}
+; RUN: opt < %s -instcombine -S | grep sext | count 1
+
+; Should only have one sext and the add should be i32 instead of i64.
+
+define i64 @test1(i32 %A) {
+ %B = ashr i32 %A, 7 ; <i32> [#uses=1]
+ %C = ashr i32 %A, 9 ; <i32> [#uses=1]
+ %D = sext i32 %B to i64 ; <i64> [#uses=1]
+ %E = sext i32 %C to i64 ; <i64> [#uses=1]
+ %F = add i64 %D, %E ; <i64> [#uses=1]
+ ret i64 %F
+}
+
diff --git a/test/Transforms/InstCombine/add-sitofp.ll b/test/Transforms/InstCombine/add-sitofp.ll
new file mode 100644
index 0000000..98a8cb4
--- /dev/null
+++ b/test/Transforms/InstCombine/add-sitofp.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {add nsw i32}
+
+define double @x(i32 %a, i32 %b) nounwind {
+ %m = lshr i32 %a, 24
+ %n = and i32 %m, %b
+ %o = sitofp i32 %n to double
+ %p = fadd double %o, 1.0
+ ret double %p
+}
diff --git a/test/Transforms/InstCombine/add.ll b/test/Transforms/InstCombine/add.ll
new file mode 100644
index 0000000..4719809
--- /dev/null
+++ b/test/Transforms/InstCombine/add.ll
@@ -0,0 +1,277 @@
+; This test makes sure that add instructions are properly eliminated.
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v OK | not grep add
+
+define i32 @test1(i32 %A) {
+ %B = add i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test2(i32 %A) {
+ %B = add i32 %A, 5 ; <i32> [#uses=1]
+ %C = add i32 %B, -5 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test3(i32 %A) {
+ %B = add i32 %A, 5 ; <i32> [#uses=1]
+ ;; This should get converted to an add
+ %C = sub i32 %B, 5 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test4(i32 %A, i32 %B) {
+ %C = sub i32 0, %A ; <i32> [#uses=1]
+ ; D = B + -A = B - A
+ %D = add i32 %B, %C ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test5(i32 %A, i32 %B) {
+ %C = sub i32 0, %A ; <i32> [#uses=1]
+ ; D = -A + B = B - A
+ %D = add i32 %C, %B ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test6(i32 %A) {
+ %B = mul i32 7, %A ; <i32> [#uses=1]
+ ; C = 7*A+A == 8*A == A << 3
+ %C = add i32 %B, %A ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test7(i32 %A) {
+ %B = mul i32 7, %A ; <i32> [#uses=1]
+ ; C = A+7*A == 8*A == A << 3
+ %C = add i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+; (A & C1)+(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+define i32 @test8(i32 %A, i32 %B) {
+ %A1 = and i32 %A, 7 ; <i32> [#uses=1]
+ %B1 = and i32 %B, 128 ; <i32> [#uses=1]
+ %C = add i32 %A1, %B1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test9(i32 %A) {
+ %B = shl i32 %A, 4 ; <i32> [#uses=2]
+ ; === shl int %A, 5
+ %C = add i32 %B, %B ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i1 @test10(i8 %A, i8 %b) {
+ %B = add i8 %A, %b ; <i8> [#uses=1]
+ ; === A != -b
+ %c = icmp ne i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %c
+}
+
+define i1 @test11(i8 %A) {
+ %B = add i8 %A, -1 ; <i8> [#uses=1]
+ ; === A != 1
+ %c = icmp ne i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %c
+}
+
+define i32 @test12(i32 %A, i32 %B) {
+ ; Should be transformed into shl A, 1
+ %C_OK = add i32 %B, %A ; <i32> [#uses=1]
+ br label %X
+
+X: ; preds = %0
+ %D = add i32 %C_OK, %A ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test13(i32 %A, i32 %B, i32 %C) {
+ %D_OK = add i32 %A, %B ; <i32> [#uses=1]
+ %E_OK = add i32 %D_OK, %C ; <i32> [#uses=1]
+ ;; shl A, 1
+ %F = add i32 %E_OK, %A ; <i32> [#uses=1]
+ ret i32 %F
+}
+
+define i32 @test14(i32 %offset, i32 %difference) {
+ %tmp.2 = and i32 %difference, 3 ; <i32> [#uses=1]
+ %tmp.3_OK = add i32 %tmp.2, %offset ; <i32> [#uses=1]
+ %tmp.5.mask = and i32 %difference, -4 ; <i32> [#uses=1]
+ ; == add %offset, %difference
+ %tmp.8 = add i32 %tmp.3_OK, %tmp.5.mask ; <i32> [#uses=1]
+ ret i32 %tmp.8
+}
+
+define i8 @test15(i8 %A) {
+ ; Does not effect result
+ %B = add i8 %A, -64 ; <i8> [#uses=1]
+ ; Only one bit set
+ %C = and i8 %B, 16 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i8 @test16(i8 %A) {
+ ; Turn this into a XOR
+ %B = add i8 %A, 16 ; <i8> [#uses=1]
+ ; Only one bit set
+ %C = and i8 %B, 16 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i32 @test17(i32 %A) {
+ %B = xor i32 %A, -1 ; <i32> [#uses=1]
+ ; == sub int 0, %A
+ %C = add i32 %B, 1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i8 @test18(i8 %A) {
+ %B = xor i8 %A, -1 ; <i8> [#uses=1]
+ ; == sub ubyte 16, %A
+ %C = add i8 %B, 17 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i32 @test19(i1 %C) {
+ %A = select i1 %C, i32 1000, i32 10 ; <i32> [#uses=1]
+ %V = add i32 %A, 123 ; <i32> [#uses=1]
+ ret i32 %V
+}
+
+define i32 @test20(i32 %x) {
+ %tmp.2 = xor i32 %x, -2147483648 ; <i32> [#uses=1]
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i32 %tmp.2, -2147483648 ; <i32> [#uses=1]
+ ret i32 %tmp.4
+}
+
+define i1 @test21(i32 %x) {
+ %t = add i32 %x, 4 ; <i32> [#uses=1]
+ %y = icmp eq i32 %t, 123 ; <i1> [#uses=1]
+ ret i1 %y
+}
+
+define i32 @test22(i32 %V) {
+ %V2 = add i32 %V, 10 ; <i32> [#uses=1]
+ switch i32 %V2, label %Default [
+ i32 20, label %Lab1
+ i32 30, label %Lab2
+ ]
+
+Default: ; preds = %0
+ ret i32 123
+
+Lab1: ; preds = %0
+ ret i32 12312
+
+Lab2: ; preds = %0
+ ret i32 1231231
+}
+
+define i32 @test23(i1 %C, i32 %a) {
+entry:
+ br i1 %C, label %endif, label %else
+
+else: ; preds = %entry
+ br label %endif
+
+endif: ; preds = %else, %entry
+ %b.0 = phi i32 [ 0, %entry ], [ 1, %else ] ; <i32> [#uses=1]
+ %tmp.4 = add i32 %b.0, 1 ; <i32> [#uses=1]
+ ret i32 %tmp.4
+}
+
+define i32 @test24(i32 %A) {
+ %B = add i32 %A, 1 ; <i32> [#uses=1]
+ %C = shl i32 %B, 1 ; <i32> [#uses=1]
+ %D = sub i32 %C, 2 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i64 @test25(i64 %Y) {
+ %tmp.4 = shl i64 %Y, 2 ; <i64> [#uses=1]
+ %tmp.12 = shl i64 %Y, 2 ; <i64> [#uses=1]
+ %tmp.8 = add i64 %tmp.4, %tmp.12 ; <i64> [#uses=1]
+ ret i64 %tmp.8
+}
+
+define i32 @test26(i32 %A, i32 %B) {
+ %C = add i32 %A, %B ; <i32> [#uses=1]
+ %D = sub i32 %C, %B ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test27(i1 %C, i32 %X, i32 %Y) {
+ %A = add i32 %X, %Y ; <i32> [#uses=1]
+ %B = add i32 %Y, 123 ; <i32> [#uses=1]
+ ;; Fold add through select.
+ %C.upgrd.1 = select i1 %C, i32 %A, i32 %B ; <i32> [#uses=1]
+ %D = sub i32 %C.upgrd.1, %Y ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test28(i32 %X) {
+ %Y = add i32 %X, 1234 ; <i32> [#uses=1]
+ %Z = sub i32 42, %Y ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test29(i32 %X, i32 %x) {
+ %tmp.2 = sub i32 %X, %x ; <i32> [#uses=2]
+ %tmp.2.mask = and i32 %tmp.2, 63 ; <i32> [#uses=1]
+ %tmp.6 = add i32 %tmp.2.mask, %x ; <i32> [#uses=1]
+ %tmp.7 = and i32 %tmp.6, 63 ; <i32> [#uses=1]
+ %tmp.9 = and i32 %tmp.2, -64 ; <i32> [#uses=1]
+ %tmp.10 = or i32 %tmp.7, %tmp.9 ; <i32> [#uses=1]
+ ret i32 %tmp.10
+}
+
+define i64 @test30(i64 %x) {
+ %tmp.2 = xor i64 %x, -9223372036854775808 ; <i64> [#uses=1]
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i64 %tmp.2, -9223372036854775808 ; <i64> [#uses=1]
+ ret i64 %tmp.4
+}
+
+define i32 @test31(i32 %A) {
+ %B = add i32 %A, 4 ; <i32> [#uses=1]
+ %C = mul i32 %B, 5 ; <i32> [#uses=1]
+ %D = sub i32 %C, 20 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test32(i32 %A) {
+ %B = add i32 %A, 4 ; <i32> [#uses=1]
+ %C = shl i32 %B, 2 ; <i32> [#uses=1]
+ %D = sub i32 %C, 16 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i8 @test33(i8 %A) {
+ %B = and i8 %A, -2 ; <i8> [#uses=1]
+ %C = add i8 %B, 1 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i8 @test34(i8 %A) {
+ %B = add i8 %A, 64 ; <i8> [#uses=1]
+ %C = and i8 %B, 12 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i32 @test35(i32 %a) {
+ %tmpnot = xor i32 %a, -1 ; <i32> [#uses=1]
+ %tmp2 = add i32 %tmpnot, %a ; <i32> [#uses=1]
+ ret i32 %tmp2
+}
+
+define i32 @test36(i32 %a) {
+ %x = and i32 %a, -2
+ %y = and i32 %a, -126
+ %z = add i32 %x, %y
+ %q = and i32 %z, 1 ; always zero
+ ret i32 %q
+}
diff --git a/test/Transforms/InstCombine/add2.ll b/test/Transforms/InstCombine/add2.ll
new file mode 100644
index 0000000..1cbdd3a
--- /dev/null
+++ b/test/Transforms/InstCombine/add2.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -S | not grep add
+
+define i64 @test1(i64 %A, i32 %B) {
+ %tmp12 = zext i32 %B to i64
+ %tmp3 = shl i64 %tmp12, 32
+ %tmp5 = add i64 %tmp3, %A
+ %tmp6 = and i64 %tmp5, 123
+ ret i64 %tmp6
+}
+
+define i32 @test3(i32 %A) {
+ %B = and i32 %A, 7
+ %C = and i32 %A, 32
+ %F = add i32 %B, %C
+ ret i32 %F
+}
+
+define i32 @test4(i32 %A) {
+ %B = and i32 %A, 128
+ %C = lshr i32 %A, 30
+ %F = add i32 %B, %C
+ ret i32 %F
+}
+
diff --git a/test/Transforms/InstCombine/add3.ll b/test/Transforms/InstCombine/add3.ll
new file mode 100644
index 0000000..cde3e24
--- /dev/null
+++ b/test/Transforms/InstCombine/add3.ll
@@ -0,0 +1,21 @@
+; RUN: opt < %s -instcombine -S | grep inttoptr | count 2
+
+;; Target triple for gep raising case below.
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+
+; PR1795
+define void @test2(i32 %.val24) {
+EntryBlock:
+ add i32 %.val24, -12
+ inttoptr i32 %0 to i32*
+ store i32 1, i32* %1
+ add i32 %.val24, -16
+ inttoptr i32 %2 to i32*
+ getelementptr i32* %3, i32 1
+ load i32* %4
+ tail call i32 @callee( i32 %5 )
+ ret void
+}
+
+declare i32 @callee(i32)
diff --git a/test/Transforms/InstCombine/addnegneg.ll b/test/Transforms/InstCombine/addnegneg.ll
new file mode 100644
index 0000000..a3a09f2
--- /dev/null
+++ b/test/Transforms/InstCombine/addnegneg.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | grep { sub } | count 1
+; PR2047
+
+define i32 @l(i32 %a, i32 %b, i32 %c, i32 %d) {
+entry:
+ %b.neg = sub i32 0, %b ; <i32> [#uses=1]
+ %c.neg = sub i32 0, %c ; <i32> [#uses=1]
+ %sub4 = add i32 %c.neg, %b.neg ; <i32> [#uses=1]
+ %sub6 = add i32 %sub4, %d ; <i32> [#uses=1]
+ ret i32 %sub6
+}
+
diff --git a/test/Transforms/InstCombine/adjust-for-sminmax.ll b/test/Transforms/InstCombine/adjust-for-sminmax.ll
new file mode 100644
index 0000000..b9b6f70
--- /dev/null
+++ b/test/Transforms/InstCombine/adjust-for-sminmax.ll
@@ -0,0 +1,85 @@
+; RUN: opt < %s -instcombine -S | grep {icmp s\[lg\]t i32 %n, 0} | count 16
+
+; Instcombine should recognize that this code can be adjusted
+; to fit the canonical smax/smin pattern.
+
+define i32 @floor_a(i32 %n) {
+ %t = icmp sgt i32 %n, -1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_a(i32 %n) {
+ %t = icmp slt i32 %n, 1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_b(i32 %n) {
+ %t = icmp sgt i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_b(i32 %n) {
+ %t = icmp slt i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_c(i32 %n) {
+ %t = icmp sge i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_c(i32 %n) {
+ %t = icmp sle i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_d(i32 %n) {
+ %t = icmp sge i32 %n, 1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_d(i32 %n) {
+ %t = icmp sle i32 %n, -1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_e(i32 %n) {
+ %t = icmp sgt i32 %n, -1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_e(i32 %n) {
+ %t = icmp slt i32 %n, 1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_f(i32 %n) {
+ %t = icmp sgt i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_f(i32 %n) {
+ %t = icmp slt i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_g(i32 %n) {
+ %t = icmp sge i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_g(i32 %n) {
+ %t = icmp sle i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_h(i32 %n) {
+ %t = icmp sge i32 %n, 1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_h(i32 %n) {
+ %t = icmp sle i32 %n, -1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
diff --git a/test/Transforms/InstCombine/align-2d-gep.ll b/test/Transforms/InstCombine/align-2d-gep.ll
new file mode 100644
index 0000000..eeca5c0
--- /dev/null
+++ b/test/Transforms/InstCombine/align-2d-gep.ll
@@ -0,0 +1,44 @@
+; RUN: opt < %s -instcombine -S | grep {align 16} | count 1
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+; A multi-dimensional array in a nested loop doing vector stores that
+; aren't yet aligned. Instcombine can understand the addressing in the
+; Nice case to prove 16 byte alignment. In the Awkward case, the inner
+; array dimension is not even, so the stores to it won't always be
+; aligned. Instcombine should prove alignment in exactly one of the two
+; stores.
+
+@Nice = global [1001 x [20000 x double]] zeroinitializer, align 32
+@Awkward = global [1001 x [20001 x double]] zeroinitializer, align 32
+
+define void @foo() nounwind {
+entry:
+ br label %bb7.outer
+
+bb7.outer:
+ %i = phi i64 [ 0, %entry ], [ %indvar.next26, %bb11 ]
+ br label %bb1
+
+bb1:
+ %j = phi i64 [ 0, %bb7.outer ], [ %indvar.next, %bb1 ]
+
+ %t4 = getelementptr [1001 x [20000 x double]]* @Nice, i64 0, i64 %i, i64 %j
+ %q = bitcast double* %t4 to <2 x double>*
+ store <2 x double><double 0.0, double 0.0>, <2 x double>* %q, align 8
+
+ %s4 = getelementptr [1001 x [20001 x double]]* @Awkward, i64 0, i64 %i, i64 %j
+ %r = bitcast double* %s4 to <2 x double>*
+ store <2 x double><double 0.0, double 0.0>, <2 x double>* %r, align 8
+
+ %indvar.next = add i64 %j, 2
+ %exitcond = icmp eq i64 %indvar.next, 557
+ br i1 %exitcond, label %bb11, label %bb1
+
+bb11:
+ %indvar.next26 = add i64 %i, 1
+ %exitcond27 = icmp eq i64 %indvar.next26, 991
+ br i1 %exitcond27, label %return.split, label %bb7.outer
+
+return.split:
+ ret void
+}
diff --git a/test/Transforms/InstCombine/align-addr.ll b/test/Transforms/InstCombine/align-addr.ll
new file mode 100644
index 0000000..d8ad5a9
--- /dev/null
+++ b/test/Transforms/InstCombine/align-addr.ll
@@ -0,0 +1,31 @@
+; RUN: opt < %s -instcombine -S | grep {align 16} | count 1
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+; Instcombine should be able to prove vector alignment in the
+; presence of a few mild address computation tricks.
+
+define void @foo(i8* %b, i64 %n, i64 %u, i64 %y) nounwind {
+entry:
+ %c = ptrtoint i8* %b to i64
+ %d = and i64 %c, -16
+ %e = inttoptr i64 %d to double*
+ %v = mul i64 %u, 2
+ %z = and i64 %y, -2
+ %t1421 = icmp eq i64 %n, 0
+ br i1 %t1421, label %return, label %bb
+
+bb:
+ %i = phi i64 [ %indvar.next, %bb ], [ 20, %entry ]
+ %j = mul i64 %i, %v
+ %h = add i64 %j, %z
+ %t8 = getelementptr double* %e, i64 %h
+ %p = bitcast double* %t8 to <2 x double>*
+ store <2 x double><double 0.0, double 0.0>, <2 x double>* %p, align 8
+ %indvar.next = add i64 %i, 1
+ %exitcond = icmp eq i64 %indvar.next, %n
+ br i1 %exitcond, label %return, label %bb
+
+return:
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/align-external.ll b/test/Transforms/InstCombine/align-external.ll
new file mode 100644
index 0000000..6e8ad87
--- /dev/null
+++ b/test/Transforms/InstCombine/align-external.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; Don't assume that external global variables have their preferred
+; alignment. They may only have the ABI minimum alignment.
+
+; CHECK: %s = shl i64 %a, 3
+; CHECK: %r = or i64 %s, ptrtoint (i32* @A to i64)
+; CHECK: %q = add i64 %r, 1
+; CHECK: ret i64 %q
+
+target datalayout = "-i32:8:32"
+
+@A = external global i32
+@B = external global i32
+
+define i64 @foo(i64 %a) {
+ %t = ptrtoint i32* @A to i64
+ %s = shl i64 %a, 3
+ %r = or i64 %t, %s
+ %q = add i64 %r, 1
+ ret i64 %q
+}
diff --git a/test/Transforms/InstCombine/align-inc.ll b/test/Transforms/InstCombine/align-inc.ll
new file mode 100644
index 0000000..71512b3
--- /dev/null
+++ b/test/Transforms/InstCombine/align-inc.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | grep {GLOBAL.*align 16}
+; RUN: opt < %s -instcombine -S | grep {tmp = load}
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+@GLOBAL = internal global [4 x i32] zeroinitializer
+
+define <16 x i8> @foo(<2 x i64> %x) {
+entry:
+ %tmp = load <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 1
+ ret <16 x i8> %tmp
+}
+
diff --git a/test/Transforms/InstCombine/alloca.ll b/test/Transforms/InstCombine/alloca.ll
new file mode 100644
index 0000000..b9add4d
--- /dev/null
+++ b/test/Transforms/InstCombine/alloca.ll
@@ -0,0 +1,32 @@
+; Zero byte allocas should be deleted.
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep alloca
+; END.
+
+declare void @use(...)
+
+define void @test() {
+ %X = alloca [0 x i32] ; <[0 x i32]*> [#uses=1]
+ call void (...)* @use( [0 x i32]* %X )
+ %Y = alloca i32, i32 0 ; <i32*> [#uses=1]
+ call void (...)* @use( i32* %Y )
+ %Z = alloca { } ; <{ }*> [#uses=1]
+ call void (...)* @use( { }* %Z )
+ ret void
+}
+
+define void @test2() {
+ %A = alloca i32 ; <i32*> [#uses=1]
+ store i32 123, i32* %A
+ ret void
+}
+
+define void @test3() {
+ %A = alloca { i32 } ; <{ i32 }*> [#uses=1]
+ %B = getelementptr { i32 }* %A, i32 0, i32 0 ; <i32*> [#uses=1]
+ store i32 123, i32* %B
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/and-compare.ll b/test/Transforms/InstCombine/and-compare.ll
new file mode 100644
index 0000000..c30a245
--- /dev/null
+++ b/test/Transforms/InstCombine/and-compare.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep and | count 1
+
+; Should be optimized to one and.
+define i1 @test1(i32 %a, i32 %b) {
+ %tmp1 = and i32 %a, 65280 ; <i32> [#uses=1]
+ %tmp3 = and i32 %b, 65280 ; <i32> [#uses=1]
+ %tmp = icmp ne i32 %tmp1, %tmp3 ; <i1> [#uses=1]
+ ret i1 %tmp
+}
+
diff --git a/test/Transforms/InstCombine/and-fcmp.ll b/test/Transforms/InstCombine/and-fcmp.ll
new file mode 100644
index 0000000..91868d1
--- /dev/null
+++ b/test/Transforms/InstCombine/and-fcmp.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -instcombine -S | grep fcmp | count 3
+; RUN: opt < %s -instcombine -S | grep ret | grep 0
+
+define zeroext i8 @t1(float %x, float %y) nounwind {
+ %a = fcmp ueq float %x, %y
+ %b = fcmp ord float %x, %y
+ %c = and i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+define zeroext i8 @t2(float %x, float %y) nounwind {
+ %a = fcmp olt float %x, %y
+ %b = fcmp ord float %x, %y
+ %c = and i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+define zeroext i8 @t3(float %x, float %y) nounwind {
+ %a = fcmp oge float %x, %y
+ %b = fcmp uno float %x, %y
+ %c = and i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+define zeroext i8 @t4(float %x, float %y) nounwind {
+ %a = fcmp one float %y, %x
+ %b = fcmp ord float %x, %y
+ %c = and i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
diff --git a/test/Transforms/InstCombine/and-not-or.ll b/test/Transforms/InstCombine/and-not-or.ll
new file mode 100644
index 0000000..9dce7b4
--- /dev/null
+++ b/test/Transforms/InstCombine/and-not-or.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -instcombine -S | grep {and i32 %x, %y} | count 4
+; RUN: opt < %s -instcombine -S | not grep {or}
+
+define i32 @func1(i32 %x, i32 %y) nounwind {
+entry:
+ %n = xor i32 %y, -1
+ %o = or i32 %n, %x
+ %a = and i32 %o, %y
+ ret i32 %a
+}
+
+define i32 @func2(i32 %x, i32 %y) nounwind {
+entry:
+ %n = xor i32 %y, -1
+ %o = or i32 %x, %n
+ %a = and i32 %o, %y
+ ret i32 %a
+}
+
+define i32 @func3(i32 %x, i32 %y) nounwind {
+entry:
+ %n = xor i32 %y, -1
+ %o = or i32 %n, %x
+ %a = and i32 %y, %o
+ ret i32 %a
+}
+
+define i32 @func4(i32 %x, i32 %y) nounwind {
+entry:
+ %n = xor i32 %y, -1
+ %o = or i32 %x, %n
+ %a = and i32 %y, %o
+ ret i32 %a
+}
diff --git a/test/Transforms/InstCombine/and-or-and.ll b/test/Transforms/InstCombine/and-or-and.ll
new file mode 100644
index 0000000..216cd46
--- /dev/null
+++ b/test/Transforms/InstCombine/and-or-and.ll
@@ -0,0 +1,61 @@
+; If we have an 'and' of the result of an 'or', and one of the 'or' operands
+; cannot have contributed any of the resultant bits, delete the or. This
+; occurs for very common C/C++ code like this:
+;
+; struct foo { int A : 16; int B : 16; };
+; void test(struct foo *F, int X, int Y) {
+; F->A = X; F->B = Y;
+; }
+;
+; Which corresponds to test1.
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep {or }
+
+define i32 @test1(i32 %X, i32 %Y) {
+ %A = and i32 %X, 7 ; <i32> [#uses=1]
+ %B = and i32 %Y, 8 ; <i32> [#uses=1]
+ %C = or i32 %A, %B ; <i32> [#uses=1]
+ ;; This cannot include any bits from %Y!
+ %D = and i32 %C, 7 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test2(i32 %X, i8 %Y) {
+ %B = zext i8 %Y to i32 ; <i32> [#uses=1]
+ %C = or i32 %X, %B ; <i32> [#uses=1]
+ ;; This cannot include any bits from %Y!
+ %D = and i32 %C, 65536 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test3(i32 %X, i32 %Y) {
+ %B = shl i32 %Y, 1 ; <i32> [#uses=1]
+ %C = or i32 %X, %B ; <i32> [#uses=1]
+ ;; This cannot include any bits from %Y!
+ %D = and i32 %C, 1 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test4(i32 %X, i32 %Y) {
+ %B = lshr i32 %Y, 31 ; <i32> [#uses=1]
+ %C = or i32 %X, %B ; <i32> [#uses=1]
+ ;; This cannot include any bits from %Y!
+ %D = and i32 %C, 2 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @or_test1(i32 %X, i32 %Y) {
+ %A = and i32 %X, 1 ; <i32> [#uses=1]
+ ;; This cannot include any bits from X!
+ %B = or i32 %A, 1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i8 @or_test2(i8 %X, i8 %Y) {
+ %A = shl i8 %X, 7 ; <i8> [#uses=1]
+ ;; This cannot include any bits from X!
+ %B = or i8 %A, -128 ; <i8> [#uses=1]
+ ret i8 %B
+}
+
diff --git a/test/Transforms/InstCombine/and-or-not.ll b/test/Transforms/InstCombine/and-or-not.ll
new file mode 100644
index 0000000..37ec3bc
--- /dev/null
+++ b/test/Transforms/InstCombine/and-or-not.ll
@@ -0,0 +1,46 @@
+; RUN: opt < %s -instcombine -S | grep xor | count 4
+; RUN: opt < %s -instcombine -S | not grep and
+; RUN: opt < %s -instcombine -S | not grep { or}
+
+; PR1510
+
+; These are all equivelent to A^B
+
+define i32 @test1(i32 %a, i32 %b) {
+entry:
+ %tmp3 = or i32 %b, %a ; <i32> [#uses=1]
+ %tmp3not = xor i32 %tmp3, -1 ; <i32> [#uses=1]
+ %tmp6 = and i32 %b, %a ; <i32> [#uses=1]
+ %tmp7 = or i32 %tmp6, %tmp3not ; <i32> [#uses=1]
+ %tmp7not = xor i32 %tmp7, -1 ; <i32> [#uses=1]
+ ret i32 %tmp7not
+}
+
+define i32 @test2(i32 %a, i32 %b) {
+entry:
+ %tmp3 = or i32 %b, %a ; <i32> [#uses=1]
+ %tmp6 = and i32 %b, %a ; <i32> [#uses=1]
+ %tmp6not = xor i32 %tmp6, -1 ; <i32> [#uses=1]
+ %tmp7 = and i32 %tmp3, %tmp6not ; <i32> [#uses=1]
+ ret i32 %tmp7
+}
+
+define <4 x i32> @test3(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %tmp3 = or <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp3not = xor <4 x i32> %tmp3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
+ %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp7 = or <4 x i32> %tmp6, %tmp3not ; <<4 x i32>> [#uses=1]
+ %tmp7not = xor <4 x i32> %tmp7, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %tmp7not
+}
+
+define <4 x i32> @test4(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %tmp3 = or <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp6not = xor <4 x i32> %tmp6, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
+ %tmp7 = and <4 x i32> %tmp3, %tmp6not ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %tmp7
+}
+
diff --git a/test/Transforms/InstCombine/and-or.ll b/test/Transforms/InstCombine/and-or.ll
new file mode 100644
index 0000000..b4224b3
--- /dev/null
+++ b/test/Transforms/InstCombine/and-or.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -instcombine -S | grep {and i32 %a, 1} | count 4
+; RUN: opt < %s -instcombine -S | grep {or i32 %0, %b} | count 4
+
+
+define i32 @func1(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %0 = or i32 %b, %a ; <i32> [#uses=1]
+ %1 = and i32 %0, 1 ; <i32> [#uses=1]
+ %2 = and i32 %b, -2 ; <i32> [#uses=1]
+ %3 = or i32 %1, %2 ; <i32> [#uses=1]
+ ret i32 %3
+}
+
+define i32 @func2(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %0 = or i32 %a, %b ; <i32> [#uses=1]
+ %1 = and i32 1, %0 ; <i32> [#uses=1]
+ %2 = and i32 -2, %b ; <i32> [#uses=1]
+ %3 = or i32 %1, %2 ; <i32> [#uses=1]
+ ret i32 %3
+}
+
+define i32 @func3(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %0 = or i32 %b, %a ; <i32> [#uses=1]
+ %1 = and i32 %0, 1 ; <i32> [#uses=1]
+ %2 = and i32 %b, -2 ; <i32> [#uses=1]
+ %3 = or i32 %2, %1 ; <i32> [#uses=1]
+ ret i32 %3
+}
+
+define i32 @func4(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %0 = or i32 %a, %b ; <i32> [#uses=1]
+ %1 = and i32 1, %0 ; <i32> [#uses=1]
+ %2 = and i32 -2, %b ; <i32> [#uses=1]
+ %3 = or i32 %2, %1 ; <i32> [#uses=1]
+ ret i32 %3
+}
diff --git a/test/Transforms/InstCombine/and-xor-merge.ll b/test/Transforms/InstCombine/and-xor-merge.ll
new file mode 100644
index 0000000..e432a9a
--- /dev/null
+++ b/test/Transforms/InstCombine/and-xor-merge.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | grep and | count 1
+; RUN: opt < %s -instcombine -S | grep xor | count 2
+
+; (x&z) ^ (y&z) -> (x^y)&z
+define i32 @test1(i32 %x, i32 %y, i32 %z) {
+ %tmp3 = and i32 %z, %x
+ %tmp6 = and i32 %z, %y
+ %tmp7 = xor i32 %tmp3, %tmp6
+ ret i32 %tmp7
+}
+
+; (x & y) ^ (x|y) -> x^y
+define i32 @test2(i32 %x, i32 %y, i32 %z) {
+ %tmp3 = and i32 %y, %x
+ %tmp6 = or i32 %y, %x
+ %tmp7 = xor i32 %tmp3, %tmp6
+ ret i32 %tmp7
+}
+
diff --git a/test/Transforms/InstCombine/and.ll b/test/Transforms/InstCombine/and.ll
new file mode 100644
index 0000000..8492df9
--- /dev/null
+++ b/test/Transforms/InstCombine/and.ll
@@ -0,0 +1,255 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+
+; RUN: opt < %s -instcombine -S | not grep and
+
+define i32 @test1(i32 %A) {
+ ; zero result
+ %B = and i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test2(i32 %A) {
+ ; noop
+ %B = and i32 %A, -1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i1 @test3(i1 %A) {
+ ; always = false
+ %B = and i1 %A, false ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test4(i1 %A) {
+ ; noop
+ %B = and i1 %A, true ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i32 @test5(i32 %A) {
+ %B = and i32 %A, %A ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i1 @test6(i1 %A) {
+ %B = and i1 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+; A & ~A == 0
+define i32 @test7(i32 %A) {
+ %NotA = xor i32 %A, -1 ; <i32> [#uses=1]
+ %B = and i32 %A, %NotA ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+; AND associates
+define i8 @test8(i8 %A) {
+ %B = and i8 %A, 3 ; <i8> [#uses=1]
+ %C = and i8 %B, 4 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i1 @test9(i32 %A) {
+ ; Test of sign bit, convert to setle %A, 0
+ %B = and i32 %A, -2147483648 ; <i32> [#uses=1]
+ %C = icmp ne i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test9a(i32 %A) {
+ ; Test of sign bit, convert to setle %A, 0
+ %B = and i32 %A, -2147483648 ; <i32> [#uses=1]
+ %C = icmp ne i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i32 @test10(i32 %A) {
+ %B = and i32 %A, 12 ; <i32> [#uses=1]
+ %C = xor i32 %B, 15 ; <i32> [#uses=1]
+ ; (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
+ %D = and i32 %C, 1 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test11(i32 %A, i32* %P) {
+ %B = or i32 %A, 3 ; <i32> [#uses=1]
+ %C = xor i32 %B, 12 ; <i32> [#uses=2]
+ ; additional use of C
+ store i32 %C, i32* %P
+ ; %C = and uint %B, 3 --> 3
+ %D = and i32 %C, 3 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i1 @test12(i32 %A, i32 %B) {
+ %C1 = icmp ult i32 %A, %B ; <i1> [#uses=1]
+ %C2 = icmp ule i32 %A, %B ; <i1> [#uses=1]
+ ; (A < B) & (A <= B) === (A < B)
+ %D = and i1 %C1, %C2 ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test13(i32 %A, i32 %B) {
+ %C1 = icmp ult i32 %A, %B ; <i1> [#uses=1]
+ %C2 = icmp ugt i32 %A, %B ; <i1> [#uses=1]
+ ; (A < B) & (A > B) === false
+ %D = and i1 %C1, %C2 ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test14(i8 %A) {
+ %B = and i8 %A, -128 ; <i8> [#uses=1]
+ %C = icmp ne i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i8 @test15(i8 %A) {
+ %B = lshr i8 %A, 7 ; <i8> [#uses=1]
+ ; Always equals zero
+ %C = and i8 %B, 2 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i8 @test16(i8 %A) {
+ %B = shl i8 %A, 2 ; <i8> [#uses=1]
+ %C = and i8 %B, 3 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; ~(~X & Y) --> (X | ~Y)
+define i8 @test17(i8 %X, i8 %Y) {
+ %B = xor i8 %X, -1 ; <i8> [#uses=1]
+ %C = and i8 %B, %Y ; <i8> [#uses=1]
+ %D = xor i8 %C, -1 ; <i8> [#uses=1]
+ ret i8 %D
+}
+
+define i1 @test18(i32 %A) {
+ %B = and i32 %A, -128 ; <i32> [#uses=1]
+ ;; C >= 128
+ %C = icmp ne i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test18a(i8 %A) {
+ %B = and i8 %A, -2 ; <i8> [#uses=1]
+ %C = icmp eq i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i32 @test19(i32 %A) {
+ %B = shl i32 %A, 3 ; <i32> [#uses=1]
+ ;; Clearing a zero bit
+ %C = and i32 %B, -2 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i8 @test20(i8 %A) {
+ %C = lshr i8 %A, 7 ; <i8> [#uses=1]
+ ;; Unneeded
+ %D = and i8 %C, 1 ; <i8> [#uses=1]
+ ret i8 %D
+}
+
+define i1 @test22(i32 %A) {
+ %B = icmp eq i32 %A, 1 ; <i1> [#uses=1]
+ %C = icmp sge i32 %A, 3 ; <i1> [#uses=1]
+ ;; false
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test23(i32 %A) {
+ %B = icmp sgt i32 %A, 1 ; <i1> [#uses=1]
+ %C = icmp sle i32 %A, 2 ; <i1> [#uses=1]
+ ;; A == 2
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test24(i32 %A) {
+ %B = icmp sgt i32 %A, 1 ; <i1> [#uses=1]
+ %C = icmp ne i32 %A, 2 ; <i1> [#uses=1]
+ ;; A > 2
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test25(i32 %A) {
+ %B = icmp sge i32 %A, 50 ; <i1> [#uses=1]
+ %C = icmp slt i32 %A, 100 ; <i1> [#uses=1]
+ ;; (A-50) <u 50
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test26(i32 %A) {
+ %B = icmp ne i32 %A, 50 ; <i1> [#uses=1]
+ %C = icmp ne i32 %A, 51 ; <i1> [#uses=1]
+ ;; (A-50) > 1
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i8 @test27(i8 %A) {
+ %B = and i8 %A, 4 ; <i8> [#uses=1]
+ %C = sub i8 %B, 16 ; <i8> [#uses=1]
+ ;; 0xF0
+ %D = and i8 %C, -16 ; <i8> [#uses=1]
+ %E = add i8 %D, 16 ; <i8> [#uses=1]
+ ret i8 %E
+}
+
+;; This is juse a zero extending shr.
+define i32 @test28(i32 %X) {
+ ;; Sign extend
+ %Y = ashr i32 %X, 24 ; <i32> [#uses=1]
+ ;; Mask out sign bits
+ %Z = and i32 %Y, 255 ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test29(i8 %X) {
+ %Y = zext i8 %X to i32 ; <i32> [#uses=1]
+ ;; Zero extend makes this unneeded.
+ %Z = and i32 %Y, 255 ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test30(i1 %X) {
+ %Y = zext i1 %X to i32 ; <i32> [#uses=1]
+ %Z = and i32 %Y, 1 ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test31(i1 %X) {
+ %Y = zext i1 %X to i32 ; <i32> [#uses=1]
+ %Z = shl i32 %Y, 4 ; <i32> [#uses=1]
+ %A = and i32 %Z, 16 ; <i32> [#uses=1]
+ ret i32 %A
+}
+
+define i32 @test32(i32 %In) {
+ %Y = and i32 %In, 16 ; <i32> [#uses=1]
+ %Z = lshr i32 %Y, 2 ; <i32> [#uses=1]
+ %A = and i32 %Z, 1 ; <i32> [#uses=1]
+ ret i32 %A
+}
+
+;; Code corresponding to one-bit bitfield ^1.
+define i32 @test33(i32 %b) {
+ %tmp.4.mask = and i32 %b, 1 ; <i32> [#uses=1]
+ %tmp.10 = xor i32 %tmp.4.mask, 1 ; <i32> [#uses=1]
+ %tmp.12 = and i32 %b, -2 ; <i32> [#uses=1]
+ %tmp.13 = or i32 %tmp.12, %tmp.10 ; <i32> [#uses=1]
+ ret i32 %tmp.13
+}
+
+define i32 @test34(i32 %A, i32 %B) {
+ %tmp.2 = or i32 %B, %A ; <i32> [#uses=1]
+ %tmp.4 = and i32 %tmp.2, %B ; <i32> [#uses=1]
+ ret i32 %tmp.4
+}
+
diff --git a/test/Transforms/InstCombine/and2.ll b/test/Transforms/InstCombine/and2.ll
new file mode 100644
index 0000000..0af9bfa
--- /dev/null
+++ b/test/Transforms/InstCombine/and2.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | not grep and
+
+
+; PR1738
+define i1 @test1(double %X, double %Y) {
+ %tmp9 = fcmp ord double %X, 0.000000e+00
+ %tmp13 = fcmp ord double %Y, 0.000000e+00
+ %bothcond = and i1 %tmp13, %tmp9
+ ret i1 %bothcond
+}
+
+
diff --git a/test/Transforms/InstCombine/apint-add1.ll b/test/Transforms/InstCombine/apint-add1.ll
new file mode 100644
index 0000000..02f1baf
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-add1.ll
@@ -0,0 +1,34 @@
+; This test makes sure that add instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 8 != 0.
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v OK | not grep add
+
+
+define i1 @test1(i1 %x) {
+ %tmp.2 = xor i1 %x, 1
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i1 %tmp.2, 1
+ ret i1 %tmp.4
+}
+
+define i47 @test2(i47 %x) {
+ %tmp.2 = xor i47 %x, 70368744177664
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i47 %tmp.2, 70368744177664
+ ret i47 %tmp.4
+}
+
+define i15 @test3(i15 %x) {
+ %tmp.2 = xor i15 %x, 16384
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i15 %tmp.2, 16384
+ ret i15 %tmp.4
+}
+
+define i49 @test6(i49 %x) {
+ ;; (x & 254)+1 -> (x & 254)|1
+ %tmp.2 = and i49 %x, 562949953421310
+ %tmp.4 = add i49 %tmp.2, 1
+ ret i49 %tmp.4
+}
diff --git a/test/Transforms/InstCombine/apint-add2.ll b/test/Transforms/InstCombine/apint-add2.ll
new file mode 100644
index 0000000..913a70f
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-add2.ll
@@ -0,0 +1,46 @@
+; This test makes sure that add instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v OK | not grep add
+; END.
+
+define i111 @test1(i111 %x) {
+ %tmp.2 = shl i111 1, 110
+ %tmp.4 = xor i111 %x, %tmp.2
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.6 = add i111 %tmp.4, %tmp.2
+ ret i111 %tmp.6
+}
+
+define i65 @test2(i65 %x) {
+ %tmp.0 = shl i65 1, 64
+ %tmp.2 = xor i65 %x, %tmp.0
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i65 %tmp.2, %tmp.0
+ ret i65 %tmp.4
+}
+
+define i1024 @test3(i1024 %x) {
+ %tmp.0 = shl i1024 1, 1023
+ %tmp.2 = xor i1024 %x, %tmp.0
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i1024 %tmp.2, %tmp.0
+ ret i1024 %tmp.4
+}
+
+define i128 @test4(i128 %x) {
+ ;; If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
+ %tmp.5 = shl i128 1, 127
+ %tmp.1 = ashr i128 %tmp.5, 120
+ %tmp.2 = xor i128 %x, %tmp.1
+ %tmp.4 = add i128 %tmp.2, %tmp.5
+ ret i128 %tmp.4
+}
+
+define i77 @test6(i77 %x) {
+ ;; (x & 254)+1 -> (x & 254)|1
+ %tmp.2 = and i77 %x, 562949953421310
+ %tmp.4 = add i77 %tmp.2, 1
+ ret i77 %tmp.4
+}
diff --git a/test/Transforms/InstCombine/apint-and-compare.ll b/test/Transforms/InstCombine/apint-and-compare.ll
new file mode 100644
index 0000000..53e591e
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-and-compare.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | grep and | count 2
+
+; Should be optimized to one and.
+define i1 @test1(i33 %a, i33 %b) {
+ %tmp1 = and i33 %a, 65280
+ %tmp3 = and i33 %b, 65280
+ %tmp = icmp ne i33 %tmp1, %tmp3
+ ret i1 %tmp
+}
+
+define i1 @test2(i999 %a, i999 %b) {
+ %tmp1 = and i999 %a, 65280
+ %tmp3 = and i999 %b, 65280
+ %tmp = icmp ne i999 %tmp1, %tmp3
+ ret i1 %tmp
+}
diff --git a/test/Transforms/InstCombine/apint-and-or-and.ll b/test/Transforms/InstCombine/apint-and-or-and.ll
new file mode 100644
index 0000000..17d29b6
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-and-or-and.ll
@@ -0,0 +1,50 @@
+; If we have an 'and' of the result of an 'or', and one of the 'or' operands
+; cannot have contributed any of the resultant bits, delete the or. This
+; occurs for very common C/C++ code like this:
+;
+; struct foo { int A : 16; int B : 16; };
+; void test(struct foo *F, int X, int Y) {
+; F->A = X; F->B = Y;
+; }
+;
+; Which corresponds to test1.
+;
+; This tests arbitrary precision integers.
+
+; RUN: opt < %s -instcombine -S | not grep {or }
+; END.
+
+define i17 @test1(i17 %X, i17 %Y) {
+ %A = and i17 %X, 7
+ %B = and i17 %Y, 8
+ %C = or i17 %A, %B
+ %D = and i17 %C, 7 ;; This cannot include any bits from %Y!
+ ret i17 %D
+}
+
+define i49 @test3(i49 %X, i49 %Y) {
+ %B = shl i49 %Y, 1
+ %C = or i49 %X, %B
+ %D = and i49 %C, 1 ;; This cannot include any bits from %Y!
+ ret i49 %D
+}
+
+define i67 @test4(i67 %X, i67 %Y) {
+ %B = lshr i67 %Y, 66
+ %C = or i67 %X, %B
+ %D = and i67 %C, 2 ;; This cannot include any bits from %Y!
+ ret i67 %D
+}
+
+define i231 @or_test1(i231 %X, i231 %Y) {
+ %A = and i231 %X, 1
+ %B = or i231 %A, 1 ;; This cannot include any bits from X!
+ ret i231 %B
+}
+
+define i7 @or_test2(i7 %X, i7 %Y) {
+ %A = shl i7 %X, 6
+ %B = or i7 %A, 64 ;; This cannot include any bits from X!
+ ret i7 %B
+}
+
diff --git a/test/Transforms/InstCombine/apint-and-xor-merge.ll b/test/Transforms/InstCombine/apint-and-xor-merge.ll
new file mode 100644
index 0000000..8adffde
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-and-xor-merge.ll
@@ -0,0 +1,22 @@
+; This test case checks that the merge of and/xor can work on arbitrary
+; precision integers.
+
+; RUN: opt < %s -instcombine -S | grep and | count 1
+; RUN: opt < %s -instcombine -S | grep xor | count 2
+
+; (x &z ) ^ (y & z) -> (x ^ y) & z
+define i57 @test1(i57 %x, i57 %y, i57 %z) {
+ %tmp3 = and i57 %z, %x
+ %tmp6 = and i57 %z, %y
+ %tmp7 = xor i57 %tmp3, %tmp6
+ ret i57 %tmp7
+}
+
+; (x & y) ^ (x | y) -> x ^ y
+define i23 @test2(i23 %x, i23 %y, i23 %z) {
+ %tmp3 = and i23 %y, %x
+ %tmp6 = or i23 %y, %x
+ %tmp7 = xor i23 %tmp3, %tmp6
+ ret i23 %tmp7
+}
+
diff --git a/test/Transforms/InstCombine/apint-and1.ll b/test/Transforms/InstCombine/apint-and1.ll
new file mode 100644
index 0000000..cd4cbb9
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-and1.ll
@@ -0,0 +1,57 @@
+; This test makes sure that and instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 8 != 0.
+
+; RUN: opt < %s -instcombine -S | not grep {and }
+; END.
+
+define i39 @test0(i39 %A) {
+ %B = and i39 %A, 0 ; zero result
+ ret i39 %B
+}
+
+define i47 @test1(i47 %A, i47 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i47 %A, -1
+ %NotB = xor i47 %B, -1
+ %C1 = and i47 %NotA, %NotB
+ ret i47 %C1
+}
+
+define i15 @test2(i15 %x) {
+ %tmp.2 = and i15 %x, -1 ; noop
+ ret i15 %tmp.2
+}
+
+define i23 @test3(i23 %x) {
+ %tmp.0 = and i23 %x, 127
+ %tmp.2 = and i23 %tmp.0, 128
+ ret i23 %tmp.2
+}
+
+define i1 @test4(i37 %x) {
+ %A = and i37 %x, -2147483648
+ %B = icmp ne i37 %A, 0
+ ret i1 %B
+}
+
+define i7 @test5(i7 %A, i7* %P) {
+ %B = or i7 %A, 3
+ %C = xor i7 %B, 12
+ store i7 %C, i7* %P
+ %r = and i7 %C, 3
+ ret i7 %r
+}
+
+define i7 @test6(i7 %A, i7 %B) {
+ ;; ~(~X & Y) --> (X | ~Y)
+ %t0 = xor i7 %A, -1
+ %t1 = and i7 %t0, %B
+ %r = xor i7 %t1, -1
+ ret i7 %r
+}
+
+define i47 @test7(i47 %A) {
+ %X = ashr i47 %A, 39 ;; sign extend
+ %C1 = and i47 %X, 255
+ ret i47 %C1
+}
diff --git a/test/Transforms/InstCombine/apint-and2.ll b/test/Transforms/InstCombine/apint-and2.ll
new file mode 100644
index 0000000..ae74472
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-and2.ll
@@ -0,0 +1,82 @@
+; This test makes sure that and instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+
+; RUN: opt < %s -instcombine -S | not grep {and }
+; END.
+
+
+define i999 @test0(i999 %A) {
+ %B = and i999 %A, 0 ; zero result
+ ret i999 %B
+}
+
+define i477 @test1(i477 %A, i477 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i477 %A, -1
+ %NotB = xor i477 %B, -1
+ %C1 = and i477 %NotA, %NotB
+ ret i477 %C1
+}
+
+define i129 @tst(i129 %A, i129 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i129 %A, -1
+ %NotB = xor i129 %B, -1
+ %C1 = and i129 %NotA, %NotB
+ ret i129 %C1
+}
+
+define i65 @test(i65 %A, i65 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i65 %A, -1
+ %NotB = xor i65 -1, %B
+ %C1 = and i65 %NotA, %NotB
+ ret i65 %C1
+}
+
+define i66 @tes(i66 %A, i66 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i66 %A, -1
+ %NotB = xor i66 %B, -1
+ %C1 = and i66 %NotA, %NotB
+ ret i66 %C1
+}
+
+define i1005 @test2(i1005 %x) {
+ %tmp.2 = and i1005 %x, -1 ; noop
+ ret i1005 %tmp.2
+}
+
+define i123 @test3(i123 %x) {
+ %tmp.0 = and i123 %x, 127
+ %tmp.2 = and i123 %tmp.0, 128
+ ret i123 %tmp.2
+}
+
+define i1 @test4(i737 %x) {
+ %A = and i737 %x, -2147483648
+ %B = icmp ne i737 %A, 0
+ ret i1 %B
+}
+
+define i117 @test5(i117 %A, i117* %P) {
+ %B = or i117 %A, 3
+ %C = xor i117 %B, 12
+ store i117 %C, i117* %P
+ %r = and i117 %C, 3
+ ret i117 %r
+}
+
+define i117 @test6(i117 %A, i117 %B) {
+ ;; ~(~X & Y) --> (X | ~Y)
+ %t0 = xor i117 %A, -1
+ %t1 = and i117 %t0, %B
+ %r = xor i117 %t1, -1
+ ret i117 %r
+}
+
+define i1024 @test7(i1024 %A) {
+ %X = ashr i1024 %A, 1016 ;; sign extend
+ %C1 = and i1024 %X, 255
+ ret i1024 %C1
+}
diff --git a/test/Transforms/InstCombine/apint-call-cast-target.ll b/test/Transforms/InstCombine/apint-call-cast-target.ll
new file mode 100644
index 0000000..fe336de
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-call-cast-target.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S | grep call | not grep bitcast
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+
+
+define i32 @main() {
+entry:
+ %tmp = call i32 bitcast (i7* (i999*)* @ctime to i32 (i99*)*)( i99* null )
+ ret i32 %tmp
+}
+
+define i7* @ctime(i999*) {
+entry:
+ %tmp = call i7* bitcast (i32 ()* @main to i7* ()*)( )
+ ret i7* %tmp
+}
diff --git a/test/Transforms/InstCombine/apint-cast-and-cast.ll b/test/Transforms/InstCombine/apint-cast-and-cast.ll
new file mode 100644
index 0000000..251d78f
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-cast-and-cast.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | not grep bitcast
+
+define i19 @test1(i43 %val) {
+ %t1 = bitcast i43 %val to i43
+ %t2 = and i43 %t1, 1
+ %t3 = trunc i43 %t2 to i19
+ ret i19 %t3
+}
+
+define i73 @test2(i677 %val) {
+ %t1 = bitcast i677 %val to i677
+ %t2 = and i677 %t1, 1
+ %t3 = trunc i677 %t2 to i73
+ ret i73 %t3
+}
diff --git a/test/Transforms/InstCombine/apint-cast-cast-to-and.ll b/test/Transforms/InstCombine/apint-cast-cast-to-and.ll
new file mode 100644
index 0000000..b2069a9
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-cast-cast-to-and.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | not grep i41
+
+define i61 @test1(i61 %X) {
+ %Y = trunc i61 %X to i41 ;; Turn i61o an AND
+ %Z = zext i41 %Y to i61
+ ret i61 %Z
+}
+
diff --git a/test/Transforms/InstCombine/apint-cast.ll b/test/Transforms/InstCombine/apint-cast.ll
new file mode 100644
index 0000000..85e7a4f
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-cast.ll
@@ -0,0 +1,30 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+define i17 @test1(i17 %a) {
+ %tmp = zext i17 %a to i37 ; <i37> [#uses=2]
+ %tmp21 = lshr i37 %tmp, 8 ; <i37> [#uses=1]
+; CHECK: %tmp21 = lshr i17 %a, 8
+ %tmp5 = shl i37 %tmp, 8 ; <i37> [#uses=1]
+; CHECK: %tmp5 = shl i17 %a, 8
+ %tmp.upgrd.32 = or i37 %tmp21, %tmp5 ; <i37> [#uses=1]
+; CHECK: %tmp.upgrd.32 = or i17 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i37 %tmp.upgrd.32 to i17 ; <i17> [#uses=1]
+ ret i17 %tmp.upgrd.3
+; CHECK: ret i17 %tmp.upgrd.32
+}
+
+define i167 @test2(i167 %a) {
+ %tmp = zext i167 %a to i577 ; <i577> [#uses=2]
+ %tmp21 = lshr i577 %tmp, 9 ; <i577> [#uses=1]
+; CHECK: %tmp21 = lshr i167 %a, 9
+ %tmp5 = shl i577 %tmp, 8 ; <i577> [#uses=1]
+; CHECK: %tmp5 = shl i167 %a, 8
+ %tmp.upgrd.32 = or i577 %tmp21, %tmp5 ; <i577> [#uses=1]
+; CHECK: %tmp.upgrd.32 = or i167 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i577 %tmp.upgrd.32 to i167 ; <i167> [#uses=1]
+ ret i167 %tmp.upgrd.3
+; CHECK: ret i167 %tmp.upgrd.32
+}
diff --git a/test/Transforms/InstCombine/apint-div1.ll b/test/Transforms/InstCombine/apint-div1.ll
new file mode 100644
index 0000000..68aadac
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-div1.ll
@@ -0,0 +1,22 @@
+; This test makes sure that div instructions are properly eliminated.
+; This test is for Integer BitWidth < 64 && BitWidth % 2 != 0.
+;
+; RUN: opt < %s -instcombine -S | not grep div
+
+
+define i33 @test1(i33 %X) {
+ %Y = udiv i33 %X, 4096
+ ret i33 %Y
+}
+
+define i49 @test2(i49 %X) {
+ %tmp.0 = shl i49 4096, 17
+ %Y = udiv i49 %X, %tmp.0
+ ret i49 %Y
+}
+
+define i59 @test3(i59 %X, i1 %C) {
+ %V = select i1 %C, i59 1024, i59 4096
+ %R = udiv i59 %X, %V
+ ret i59 %R
+}
diff --git a/test/Transforms/InstCombine/apint-div2.ll b/test/Transforms/InstCombine/apint-div2.ll
new file mode 100644
index 0000000..2d7ac78
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-div2.ll
@@ -0,0 +1,22 @@
+; This test makes sure that div instructions are properly eliminated.
+; This test is for Integer BitWidth >= 64 && BitWidth <= 1024.
+;
+; RUN: opt < %s -instcombine -S | not grep div
+
+
+define i333 @test1(i333 %X) {
+ %Y = udiv i333 %X, 70368744177664
+ ret i333 %Y
+}
+
+define i499 @test2(i499 %X) {
+ %tmp.0 = shl i499 4096, 197
+ %Y = udiv i499 %X, %tmp.0
+ ret i499 %Y
+}
+
+define i599 @test3(i599 %X, i1 %C) {
+ %V = select i1 %C, i599 70368744177664, i599 4096
+ %R = udiv i599 %X, %V
+ ret i599 %R
+}
diff --git a/test/Transforms/InstCombine/apint-mul1.ll b/test/Transforms/InstCombine/apint-mul1.ll
new file mode 100644
index 0000000..6a5b3e7
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-mul1.ll
@@ -0,0 +1,11 @@
+; This test makes sure that mul instructions are properly eliminated.
+; This test is for Integer BitWidth < 64 && BitWidth % 2 != 0.
+;
+
+; RUN: opt < %s -instcombine -S | not grep mul
+
+
+define i17 @test1(i17 %X) {
+ %Y = mul i17 %X, 1024
+ ret i17 %Y
+}
diff --git a/test/Transforms/InstCombine/apint-mul2.ll b/test/Transforms/InstCombine/apint-mul2.ll
new file mode 100644
index 0000000..558d2fb
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-mul2.ll
@@ -0,0 +1,12 @@
+; This test makes sure that mul instructions are properly eliminated.
+; This test is for Integer BitWidth >= 64 && BitWidth % 2 >= 1024.
+;
+
+; RUN: opt < %s -instcombine -S | not grep mul
+
+
+define i177 @test1(i177 %X) {
+ %C = shl i177 1, 155
+ %Y = mul i177 %X, %C
+ ret i177 %Y
+}
diff --git a/test/Transforms/InstCombine/apint-not.ll b/test/Transforms/InstCombine/apint-not.ll
new file mode 100644
index 0000000..488b7f2
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-not.ll
@@ -0,0 +1,42 @@
+; This test makes sure that the xor instructions are properly eliminated
+; when arbitrary precision integers are used.
+
+; RUN: opt < %s -instcombine -S | not grep xor
+
+define i33 @test1(i33 %A) {
+ %B = xor i33 %A, -1
+ %C = xor i33 %B, -1
+ ret i33 %C
+}
+
+define i1 @test2(i52 %A, i52 %B) {
+ %cond = icmp ule i52 %A, %B ; Can change into uge
+ %Ret = xor i1 %cond, true
+ ret i1 %Ret
+}
+
+; Test that demorgans law can be instcombined
+define i47 @test3(i47 %A, i47 %B) {
+ %a = xor i47 %A, -1
+ %b = xor i47 %B, -1
+ %c = and i47 %a, %b
+ %d = xor i47 %c, -1
+ ret i47 %d
+}
+
+; Test that demorgens law can work with constants
+define i61 @test4(i61 %A, i61 %B) {
+ %a = xor i61 %A, -1
+ %c = and i61 %a, 5 ; 5 = ~c2
+ %d = xor i61 %c, -1
+ ret i61 %d
+}
+
+; test the mirror of demorgans law...
+define i71 @test5(i71 %A, i71 %B) {
+ %a = xor i71 %A, -1
+ %b = xor i71 %B, -1
+ %c = or i71 %a, %b
+ %d = xor i71 %c, -1
+ ret i71 %d
+}
diff --git a/test/Transforms/InstCombine/apint-or1.ll b/test/Transforms/InstCombine/apint-or1.ll
new file mode 100644
index 0000000..d4f87ac
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-or1.ll
@@ -0,0 +1,36 @@
+; This test makes sure that or instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 2 != 0.
+;
+
+; RUN: opt < %s -instcombine -S | not grep or
+
+
+define i7 @test0(i7 %X) {
+ %Y = or i7 %X, 0
+ ret i7 %Y
+}
+
+define i17 @test1(i17 %X) {
+ %Y = or i17 %X, -1
+ ret i17 %Y
+}
+
+define i23 @test2(i23 %A) {
+ ;; A | ~A == -1
+ %NotA = xor i23 -1, %A
+ %B = or i23 %A, %NotA
+ ret i23 %B
+}
+
+define i39 @test3(i39 %V, i39 %M) {
+ ;; If we have: ((V + N) & C1) | (V & C2)
+ ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
+ ;; replace with V+N.
+ %C1 = xor i39 274877906943, -1 ;; C2 = 274877906943
+ %N = and i39 %M, 274877906944
+ %A = add i39 %V, %N
+ %B = and i39 %A, %C1
+ %D = and i39 %V, 274877906943
+ %R = or i39 %B, %D
+ ret i39 %R
+}
diff --git a/test/Transforms/InstCombine/apint-or2.ll b/test/Transforms/InstCombine/apint-or2.ll
new file mode 100644
index 0000000..d7de255
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-or2.ll
@@ -0,0 +1,35 @@
+; This test makes sure that or instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+;
+; RUN: opt < %s -instcombine -S | not grep or
+
+
+define i777 @test0(i777 %X) {
+ %Y = or i777 %X, 0
+ ret i777 %Y
+}
+
+define i117 @test1(i117 %X) {
+ %Y = or i117 %X, -1
+ ret i117 %Y
+}
+
+define i1023 @test2(i1023 %A) {
+ ;; A | ~A == -1
+ %NotA = xor i1023 -1, %A
+ %B = or i1023 %A, %NotA
+ ret i1023 %B
+}
+
+define i399 @test3(i399 %V, i399 %M) {
+ ;; If we have: ((V + N) & C1) | (V & C2)
+ ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
+ ;; replace with V+N.
+ %C1 = xor i399 274877906943, -1 ;; C2 = 274877906943
+ %N = and i399 %M, 18446742974197923840
+ %A = add i399 %V, %N
+ %B = and i399 %A, %C1
+ %D = and i399 %V, 274877906943
+ %R = or i399 %B, %D
+ ret i399 %R
+}
diff --git a/test/Transforms/InstCombine/apint-rem1.ll b/test/Transforms/InstCombine/apint-rem1.ll
new file mode 100644
index 0000000..030facc
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-rem1.ll
@@ -0,0 +1,22 @@
+; This test makes sure that these instructions are properly eliminated.
+; This test is for Integer BitWidth < 64 && BitWidth % 2 != 0.
+;
+; RUN: opt < %s -instcombine -S | not grep rem
+
+
+define i33 @test1(i33 %A) {
+ %B = urem i33 %A, 4096
+ ret i33 %B
+}
+
+define i49 @test2(i49 %A) {
+ %B = shl i49 4096, 11
+ %Y = urem i49 %A, %B
+ ret i49 %Y
+}
+
+define i59 @test3(i59 %X, i1 %C) {
+ %V = select i1 %C, i59 70368744177664, i59 4096
+ %R = urem i59 %X, %V
+ ret i59 %R
+}
diff --git a/test/Transforms/InstCombine/apint-rem2.ll b/test/Transforms/InstCombine/apint-rem2.ll
new file mode 100644
index 0000000..9bfc4cd
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-rem2.ll
@@ -0,0 +1,22 @@
+; This test makes sure that these instructions are properly eliminated.
+; This test is for Integer BitWidth >= 64 && BitWidth <= 1024.
+;
+; RUN: opt < %s -instcombine -S | not grep rem
+
+
+define i333 @test1(i333 %A) {
+ %B = urem i333 %A, 70368744177664
+ ret i333 %B
+}
+
+define i499 @test2(i499 %A) {
+ %B = shl i499 4096, 111
+ %Y = urem i499 %A, %B
+ ret i499 %Y
+}
+
+define i599 @test3(i599 %X, i1 %C) {
+ %V = select i1 %C, i599 70368744177664, i599 4096
+ %R = urem i599 %X, %V
+ ret i599 %R
+}
diff --git a/test/Transforms/InstCombine/apint-select.ll b/test/Transforms/InstCombine/apint-select.ll
new file mode 100644
index 0000000..f2ea601
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-select.ll
@@ -0,0 +1,44 @@
+; This test makes sure that these instructions are properly eliminated.
+
+; RUN: opt < %s -instcombine -S | not grep select
+
+
+define i41 @test1(i1 %C) {
+ %V = select i1 %C, i41 1, i41 0 ; V = C
+ ret i41 %V
+}
+
+define i999 @test2(i1 %C) {
+ %V = select i1 %C, i999 0, i999 1 ; V = C
+ ret i999 %V
+}
+
+define i41 @test3(i41 %X) {
+ ;; (x <s 0) ? -1 : 0 -> ashr x, 31
+ %t = icmp slt i41 %X, 0
+ %V = select i1 %t, i41 -1, i41 0
+ ret i41 %V
+}
+
+define i1023 @test4(i1023 %X) {
+ ;; (x <s 0) ? -1 : 0 -> ashr x, 31
+ %t = icmp slt i1023 %X, 0
+ %V = select i1 %t, i1023 -1, i1023 0
+ ret i1023 %V
+}
+
+define i41 @test5(i41 %X) {
+ ;; ((X & 27) ? 27 : 0)
+ %Y = and i41 %X, 32
+ %t = icmp ne i41 %Y, 0
+ %V = select i1 %t, i41 32, i41 0
+ ret i41 %V
+}
+
+define i1023 @test6(i1023 %X) {
+ ;; ((X & 27) ? 27 : 0)
+ %Y = and i1023 %X, 64
+ %t = icmp ne i1023 %Y, 0
+ %V = select i1 %t, i1023 64, i1023 0
+ ret i1023 %V
+}
diff --git a/test/Transforms/InstCombine/apint-shift-simplify.ll b/test/Transforms/InstCombine/apint-shift-simplify.ll
new file mode 100644
index 0000000..1a3340a
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-shift-simplify.ll
@@ -0,0 +1,23 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: egrep {shl|lshr|ashr} | count 3
+
+define i41 @test0(i41 %A, i41 %B, i41 %C) {
+ %X = shl i41 %A, %C
+ %Y = shl i41 %B, %C
+ %Z = and i41 %X, %Y
+ ret i41 %Z
+}
+
+define i57 @test1(i57 %A, i57 %B, i57 %C) {
+ %X = lshr i57 %A, %C
+ %Y = lshr i57 %B, %C
+ %Z = or i57 %X, %Y
+ ret i57 %Z
+}
+
+define i49 @test2(i49 %A, i49 %B, i49 %C) {
+ %X = ashr i49 %A, %C
+ %Y = ashr i49 %B, %C
+ %Z = xor i49 %X, %Y
+ ret i49 %Z
+}
diff --git a/test/Transforms/InstCombine/apint-shift.ll b/test/Transforms/InstCombine/apint-shift.ll
new file mode 100644
index 0000000..55243a6
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-shift.ll
@@ -0,0 +1,184 @@
+; This test makes sure that shit instructions are properly eliminated
+; even with arbitrary precision integers.
+; RUN: opt < %s -instcombine -S | not grep sh
+; END.
+
+define i47 @test1(i47 %A) {
+ %B = shl i47 %A, 0 ; <i47> [#uses=1]
+ ret i47 %B
+}
+
+define i41 @test2(i7 %X) {
+ %A = zext i7 %X to i41 ; <i41> [#uses=1]
+ %B = shl i41 0, %A ; <i41> [#uses=1]
+ ret i41 %B
+}
+
+define i41 @test3(i41 %A) {
+ %B = ashr i41 %A, 0 ; <i41> [#uses=1]
+ ret i41 %B
+}
+
+define i39 @test4(i7 %X) {
+ %A = zext i7 %X to i39 ; <i39> [#uses=1]
+ %B = ashr i39 0, %A ; <i39> [#uses=1]
+ ret i39 %B
+}
+
+define i55 @test5(i55 %A) {
+ %B = lshr i55 %A, 55 ; <i55> [#uses=1]
+ ret i55 %B
+}
+
+define i32 @test5a(i32 %A) {
+ %B = shl i32 %A, 32 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i55 @test6(i55 %A) {
+ %B = shl i55 %A, 1 ; <i55> [#uses=1]
+ %C = mul i55 %B, 3 ; <i55> [#uses=1]
+ ret i55 %C
+}
+
+define i29 @test7(i8 %X) {
+ %A = zext i8 %X to i29 ; <i29> [#uses=1]
+ %B = ashr i29 -1, %A ; <i29> [#uses=1]
+ ret i29 %B
+}
+
+define i7 @test8(i7 %A) {
+ %B = shl i7 %A, 4 ; <i7> [#uses=1]
+ %C = shl i7 %B, 3 ; <i7> [#uses=1]
+ ret i7 %C
+}
+
+define i17 @test9(i17 %A) {
+ %B = shl i17 %A, 16 ; <i17> [#uses=1]
+ %C = lshr i17 %B, 16 ; <i17> [#uses=1]
+ ret i17 %C
+}
+
+define i19 @test10(i19 %A) {
+ %B = lshr i19 %A, 18 ; <i19> [#uses=1]
+ %C = shl i19 %B, 18 ; <i19> [#uses=1]
+ ret i19 %C
+}
+
+define i23 @test11(i23 %A) {
+ %a = mul i23 %A, 3 ; <i23> [#uses=1]
+ %B = lshr i23 %a, 11 ; <i23> [#uses=1]
+ %C = shl i23 %B, 12 ; <i23> [#uses=1]
+ ret i23 %C
+}
+
+define i47 @test12(i47 %A) {
+ %B = ashr i47 %A, 8 ; <i47> [#uses=1]
+ %C = shl i47 %B, 8 ; <i47> [#uses=1]
+ ret i47 %C
+}
+
+define i18 @test13(i18 %A) {
+ %a = mul i18 %A, 3 ; <i18> [#uses=1]
+ %B = ashr i18 %a, 8 ; <i18> [#uses=1]
+ %C = shl i18 %B, 9 ; <i18> [#uses=1]
+ ret i18 %C
+}
+
+define i35 @test14(i35 %A) {
+ %B = lshr i35 %A, 4 ; <i35> [#uses=1]
+ %C = or i35 %B, 1234 ; <i35> [#uses=1]
+ %D = shl i35 %C, 4 ; <i35> [#uses=1]
+ ret i35 %D
+}
+
+define i79 @test14a(i79 %A) {
+ %B = shl i79 %A, 4 ; <i79> [#uses=1]
+ %C = and i79 %B, 1234 ; <i79> [#uses=1]
+ %D = lshr i79 %C, 4 ; <i79> [#uses=1]
+ ret i79 %D
+}
+
+define i45 @test15(i1 %C) {
+ %A = select i1 %C, i45 3, i45 1 ; <i45> [#uses=1]
+ %V = shl i45 %A, 2 ; <i45> [#uses=1]
+ ret i45 %V
+}
+
+define i53 @test15a(i1 %X) {
+ %A = select i1 %X, i8 3, i8 1 ; <i8> [#uses=1]
+ %B = zext i8 %A to i53 ; <i53> [#uses=1]
+ %V = shl i53 64, %B ; <i53> [#uses=1]
+ ret i53 %V
+}
+
+define i1 @test16(i84 %X) {
+ %tmp.3 = ashr i84 %X, 4 ; <i84> [#uses=1]
+ %tmp.6 = and i84 %tmp.3, 1 ; <i84> [#uses=1]
+ %tmp.7 = icmp ne i84 %tmp.6, 0 ; <i1> [#uses=1]
+ ret i1 %tmp.7
+}
+
+define i1 @test17(i106 %A) {
+ %B = lshr i106 %A, 3 ; <i106> [#uses=1]
+ %C = icmp eq i106 %B, 1234 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test18(i11 %A) {
+ %B = lshr i11 %A, 10 ; <i11> [#uses=1]
+ %C = icmp eq i11 %B, 123 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test19(i37 %A) {
+ %B = ashr i37 %A, 2 ; <i37> [#uses=1]
+ %C = icmp eq i37 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test19a(i39 %A) {
+ %B = ashr i39 %A, 2 ; <i39> [#uses=1]
+ %C = icmp eq i39 %B, -1 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test20(i13 %A) {
+ %B = ashr i13 %A, 12 ; <i13> [#uses=1]
+ %C = icmp eq i13 %B, 123 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test21(i12 %A) {
+ %B = shl i12 %A, 6 ; <i12> [#uses=1]
+ %C = icmp eq i12 %B, -128 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test22(i14 %A) {
+ %B = shl i14 %A, 7 ; <i14> [#uses=1]
+ %C = icmp eq i14 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i11 @test23(i44 %A) {
+ %B = shl i44 %A, 33 ; <i44> [#uses=1]
+ %C = ashr i44 %B, 33 ; <i44> [#uses=1]
+ %D = trunc i44 %C to i11 ; <i8> [#uses=1]
+ ret i11 %D
+}
+
+define i37 @test25(i37 %tmp.2, i37 %AA) {
+ %x = lshr i37 %AA, 17 ; <i37> [#uses=1]
+ %tmp.3 = lshr i37 %tmp.2, 17 ; <i37> [#uses=1]
+ %tmp.5 = add i37 %tmp.3, %x ; <i37> [#uses=1]
+ %tmp.6 = shl i37 %tmp.5, 17 ; <i37> [#uses=1]
+ ret i37 %tmp.6
+}
+
+define i40 @test26(i40 %A) {
+ %B = lshr i40 %A, 1 ; <i40> [#uses=1]
+ %C = bitcast i40 %B to i40 ; <i40> [#uses=1]
+ %D = shl i40 %C, 1 ; <i40> [#uses=1]
+ ret i40 %D
+}
diff --git a/test/Transforms/InstCombine/apint-shl-trunc.ll b/test/Transforms/InstCombine/apint-shl-trunc.ll
new file mode 100644
index 0000000..8163e6d
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-shl-trunc.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep shl
+; END.
+
+define i1 @test0(i39 %X, i39 %A) {
+ %B = lshr i39 %X, %A
+ %D = trunc i39 %B to i1
+ ret i1 %D
+}
+
+define i1 @test1(i799 %X, i799 %A) {
+ %B = lshr i799 %X, %A
+ %D = trunc i799 %B to i1
+ ret i1 %D
+}
diff --git a/test/Transforms/InstCombine/apint-sub.ll b/test/Transforms/InstCombine/apint-sub.ll
new file mode 100644
index 0000000..8b9ff14
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-sub.ll
@@ -0,0 +1,141 @@
+; This test makes sure that sub instructions are properly eliminated
+; even with arbitrary precision integers.
+;
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v {sub i19 %Cok, %Bok} | grep -v {sub i25 0, %Aok} | not grep sub
+; END.
+
+define i23 @test1(i23 %A) {
+ %B = sub i23 %A, %A ; <i23> [#uses=1]
+ ret i23 %B
+}
+
+define i47 @test2(i47 %A) {
+ %B = sub i47 %A, 0 ; <i47> [#uses=1]
+ ret i47 %B
+}
+
+define i97 @test3(i97 %A) {
+ %B = sub i97 0, %A ; <i97> [#uses=1]
+ %C = sub i97 0, %B ; <i97> [#uses=1]
+ ret i97 %C
+}
+
+define i108 @test4(i108 %A, i108 %x) {
+ %B = sub i108 0, %A ; <i108> [#uses=1]
+ %C = sub i108 %x, %B ; <i108> [#uses=1]
+ ret i108 %C
+}
+
+define i19 @test5(i19 %A, i19 %Bok, i19 %Cok) {
+ %D = sub i19 %Bok, %Cok ; <i19> [#uses=1]
+ %E = sub i19 %A, %D ; <i19> [#uses=1]
+ ret i19 %E
+}
+
+define i57 @test6(i57 %A, i57 %B) {
+ %C = and i57 %A, %B ; <i57> [#uses=1]
+ %D = sub i57 %A, %C ; <i57> [#uses=1]
+ ret i57 %D
+}
+
+define i77 @test7(i77 %A) {
+ %B = sub i77 -1, %A ; <i77> [#uses=1]
+ ret i77 %B
+}
+
+define i27 @test8(i27 %A) {
+ %B = mul i27 9, %A ; <i27> [#uses=1]
+ %C = sub i27 %B, %A ; <i27> [#uses=1]
+ ret i27 %C
+}
+
+define i42 @test9(i42 %A) {
+ %B = mul i42 3, %A ; <i42> [#uses=1]
+ %C = sub i42 %A, %B ; <i42> [#uses=1]
+ ret i42 %C
+}
+
+define i124 @test10(i124 %A, i124 %B) {
+ %C = sub i124 0, %A ; <i124> [#uses=1]
+ %D = sub i124 0, %B ; <i124> [#uses=1]
+ %E = mul i124 %C, %D ; <i124> [#uses=1]
+ ret i124 %E
+}
+
+define i55 @test10a(i55 %A) {
+ %C = sub i55 0, %A ; <i55> [#uses=1]
+ %E = mul i55 %C, 7 ; <i55> [#uses=1]
+ ret i55 %E
+}
+
+define i1 @test11(i9 %A, i9 %B) {
+ %C = sub i9 %A, %B ; <i9> [#uses=1]
+ %cD = icmp ne i9 %C, 0 ; <i1> [#uses=1]
+ ret i1 %cD
+}
+
+define i43 @test12(i43 %A) {
+ %B = ashr i43 %A, 42 ; <i43> [#uses=1]
+ %C = sub i43 0, %B ; <i43> [#uses=1]
+ ret i43 %C
+}
+
+define i79 @test13(i79 %A) {
+ %B = lshr i79 %A, 78 ; <i79> [#uses=1]
+ %C = sub i79 0, %B ; <i79> [#uses=1]
+ ret i79 %C
+}
+
+define i1024 @test14(i1024 %A) {
+ %B = lshr i1024 %A, 1023 ; <i1024> [#uses=1]
+ %C = bitcast i1024 %B to i1024 ; <i1024> [#uses=1]
+ %D = sub i1024 0, %C ; <i1024> [#uses=1]
+ ret i1024 %D
+}
+
+define i14 @test15(i14 %A, i14 %B) {
+ %C = sub i14 0, %A ; <i14> [#uses=1]
+ %D = srem i14 %B, %C ; <i14> [#uses=1]
+ ret i14 %D
+}
+
+define i51 @test16(i51 %A) {
+ %X = sdiv i51 %A, 1123 ; <i51> [#uses=1]
+ %Y = sub i51 0, %X ; <i51> [#uses=1]
+ ret i51 %Y
+}
+
+; Can't fold subtract here because negation it might oveflow.
+; PR3142
+define i25 @test17(i25 %Aok) {
+ %B = sub i25 0, %Aok ; <i25> [#uses=1]
+ %C = sdiv i25 %B, 1234 ; <i25> [#uses=1]
+ ret i25 %C
+}
+
+define i128 @test18(i128 %Y) {
+ %tmp.4 = shl i128 %Y, 2 ; <i128> [#uses=1]
+ %tmp.12 = shl i128 %Y, 2 ; <i128> [#uses=1]
+ %tmp.8 = sub i128 %tmp.4, %tmp.12 ; <i128> [#uses=1]
+ ret i128 %tmp.8
+}
+
+define i39 @test19(i39 %X, i39 %Y) {
+ %Z = sub i39 %X, %Y ; <i39> [#uses=1]
+ %Q = add i39 %Z, %Y ; <i39> [#uses=1]
+ ret i39 %Q
+}
+
+define i1 @test20(i33 %g, i33 %h) {
+ %tmp.2 = sub i33 %g, %h ; <i33> [#uses=1]
+ %tmp.4 = icmp ne i33 %tmp.2, %g ; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
+
+define i1 @test21(i256 %g, i256 %h) {
+ %tmp.2 = sub i256 %g, %h ; <i256> [#uses=1]
+ %tmp.4 = icmp ne i256 %tmp.2, %g; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
diff --git a/test/Transforms/InstCombine/apint-xor1.ll b/test/Transforms/InstCombine/apint-xor1.ll
new file mode 100644
index 0000000..849c659
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-xor1.ll
@@ -0,0 +1,50 @@
+; This test makes sure that xor instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 8 != 0.
+
+; RUN: opt < %s -instcombine -S | not grep {xor }
+
+
+define i47 @test1(i47 %A, i47 %B) {
+ ;; (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+ %A1 = and i47 %A, 70368744177664
+ %B1 = and i47 %B, 70368744177661
+ %C1 = xor i47 %A1, %B1
+ ret i47 %C1
+}
+
+define i15 @test2(i15 %x) {
+ %tmp.2 = xor i15 %x, 0
+ ret i15 %tmp.2
+}
+
+define i23 @test3(i23 %x) {
+ %tmp.2 = xor i23 %x, %x
+ ret i23 %tmp.2
+}
+
+define i37 @test4(i37 %x) {
+ ; x ^ ~x == -1
+ %NotX = xor i37 -1, %x
+ %B = xor i37 %x, %NotX
+ ret i37 %B
+}
+
+define i7 @test5(i7 %A) {
+ ;; (A|B)^B == A & (~B)
+ %t1 = or i7 %A, 23
+ %r = xor i7 %t1, 23
+ ret i7 %r
+}
+
+define i7 @test6(i7 %A) {
+ %t1 = xor i7 %A, 23
+ %r = xor i7 %t1, 23
+ ret i7 %r
+}
+
+define i47 @test7(i47 %A) {
+ ;; (A | C1) ^ C2 -> (A | C1) & ~C2 iff (C1&C2) == C2
+ %B1 = or i47 %A, 70368744177663
+ %C1 = xor i47 %B1, 703687463
+ ret i47 %C1
+}
diff --git a/test/Transforms/InstCombine/apint-xor2.ll b/test/Transforms/InstCombine/apint-xor2.ll
new file mode 100644
index 0000000..cacc179
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-xor2.ll
@@ -0,0 +1,51 @@
+; This test makes sure that xor instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+
+; RUN: opt < %s -instcombine -S | not grep {xor }
+; END.
+
+
+define i447 @test1(i447 %A, i447 %B) {
+ ;; (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+ %A1 = and i447 %A, 70368744177664
+ %B1 = and i447 %B, 70368744177663
+ %C1 = xor i447 %A1, %B1
+ ret i447 %C1
+}
+
+define i1005 @test2(i1005 %x) {
+ %tmp.2 = xor i1005 %x, 0
+ ret i1005 %tmp.2
+}
+
+define i123 @test3(i123 %x) {
+ %tmp.2 = xor i123 %x, %x
+ ret i123 %tmp.2
+}
+
+define i737 @test4(i737 %x) {
+ ; x ^ ~x == -1
+ %NotX = xor i737 -1, %x
+ %B = xor i737 %x, %NotX
+ ret i737 %B
+}
+
+define i700 @test5(i700 %A) {
+ ;; (A|B)^B == A & (~B)
+ %t1 = or i700 %A, 288230376151711743
+ %r = xor i700 %t1, 288230376151711743
+ ret i700 %r
+}
+
+define i77 @test6(i77 %A) {
+ %t1 = xor i77 %A, 23
+ %r = xor i77 %t1, 23
+ ret i77 %r
+}
+
+define i1023 @test7(i1023 %A) {
+ ;; (A | C1) ^ C2 -> (A | C1) & ~C2 iff (C1&C2) == C2
+ %B1 = or i1023 %A, 70368744177663
+ %C1 = xor i1023 %B1, 703687463
+ ret i1023 %C1
+}
diff --git a/test/Transforms/InstCombine/apint-zext1.ll b/test/Transforms/InstCombine/apint-zext1.ll
new file mode 100644
index 0000000..40de360
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-zext1.ll
@@ -0,0 +1,11 @@
+; Tests to make sure elimination of casts is working correctly
+; This test is for Integer BitWidth <= 64 && BitWidth % 2 != 0.
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i47 @test_sext_zext(i11 %A) {
+ %c1 = zext i11 %A to i39
+ %c2 = sext i39 %c1 to i47
+ ret i47 %c2
+; CHECK: %c2 = zext i11 %A to i47
+; CHECK: ret i47 %c2
+}
diff --git a/test/Transforms/InstCombine/apint-zext2.ll b/test/Transforms/InstCombine/apint-zext2.ll
new file mode 100644
index 0000000..886dcf2
--- /dev/null
+++ b/test/Transforms/InstCombine/apint-zext2.ll
@@ -0,0 +1,11 @@
+; Tests to make sure elimination of casts is working correctly
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i1024 @test_sext_zext(i77 %A) {
+ %c1 = zext i77 %A to i533
+ %c2 = sext i533 %c1 to i1024
+ ret i1024 %c2
+; CHECK: %c2 = zext i77 %A to i1024
+; CHECK: ret i1024 %c2
+}
diff --git a/test/Transforms/InstCombine/ashr-nop.ll b/test/Transforms/InstCombine/ashr-nop.ll
new file mode 100644
index 0000000..870ede3
--- /dev/null
+++ b/test/Transforms/InstCombine/ashr-nop.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | not grep ashr
+
+define i32 @foo(i32 %x) {
+ %o = and i32 %x, 1
+ %n = add i32 %o, -1
+ %t = ashr i32 %n, 17
+ ret i32 %t
+}
diff --git a/test/Transforms/InstCombine/badmalloc.ll b/test/Transforms/InstCombine/badmalloc.ll
new file mode 100644
index 0000000..cab23b5
--- /dev/null
+++ b/test/Transforms/InstCombine/badmalloc.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target triple = "x86_64-apple-darwin10.0"
+
+declare noalias i8* @malloc(i64) nounwind
+declare void @free(i8*)
+
+; PR5130
+define i1 @test1() {
+ %A = call noalias i8* @malloc(i64 4) nounwind
+ %B = icmp eq i8* %A, null
+
+ call void @free(i8* %A)
+ ret i1 %B
+
+; CHECK: @test1
+; CHECK: ret i1 %B
+}
diff --git a/test/Transforms/InstCombine/binop-cast.ll b/test/Transforms/InstCombine/binop-cast.ll
new file mode 100644
index 0000000..3dbca7e
--- /dev/null
+++ b/test/Transforms/InstCombine/binop-cast.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @testAdd(i32 %X, i32 %Y) {
+ %tmp = add i32 %X, %Y
+; CHECK: %tmp = add i32 %X, %Y
+ %tmp.l = bitcast i32 %tmp to i32
+ ret i32 %tmp.l
+; CHECK: ret i32 %tmp
+}
diff --git a/test/Transforms/InstCombine/bit-tracking.ll b/test/Transforms/InstCombine/bit-tracking.ll
new file mode 100644
index 0000000..51bbc08
--- /dev/null
+++ b/test/Transforms/InstCombine/bit-tracking.ll
@@ -0,0 +1,26 @@
+; This file contains various testcases that require tracking whether bits are
+; set or cleared by various instructions.
+; RUN: opt < %s -instcombine -instcombine -S |\
+; RUN: not grep %ELIM
+
+; Reduce down to a single XOR
+define i32 @test3(i32 %B) {
+ %ELIMinc = and i32 %B, 1 ; <i32> [#uses=1]
+ %tmp.5 = xor i32 %ELIMinc, 1 ; <i32> [#uses=1]
+ %ELIM7 = and i32 %B, -2 ; <i32> [#uses=1]
+ %tmp.8 = or i32 %tmp.5, %ELIM7 ; <i32> [#uses=1]
+ ret i32 %tmp.8
+}
+
+; Finally, a bigger case where we chain things together. This corresponds to
+; incrementing a single-bit bitfield, which should become just an xor.
+define i32 @test4(i32 %B) {
+ %ELIM3 = shl i32 %B, 31 ; <i32> [#uses=1]
+ %ELIM4 = ashr i32 %ELIM3, 31 ; <i32> [#uses=1]
+ %inc = add i32 %ELIM4, 1 ; <i32> [#uses=1]
+ %ELIM5 = and i32 %inc, 1 ; <i32> [#uses=1]
+ %ELIM7 = and i32 %B, -2 ; <i32> [#uses=1]
+ %tmp.8 = or i32 %ELIM5, %ELIM7 ; <i32> [#uses=1]
+ ret i32 %tmp.8
+}
+
diff --git a/test/Transforms/InstCombine/bitcast-scalar-to-vector.ll b/test/Transforms/InstCombine/bitcast-scalar-to-vector.ll
new file mode 100644
index 0000000..4e9dfbb
--- /dev/null
+++ b/test/Transforms/InstCombine/bitcast-scalar-to-vector.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {ret i32 0}
+; PR4487
+
+; Bitcasts between vectors and scalars are valid, despite being ill-advised.
+
+define i32 @test(i64 %a) {
+bb20:
+ %t1 = bitcast i64 %a to <2 x i32>
+ %t2 = bitcast i64 %a to <2 x i32>
+ %t3 = xor <2 x i32> %t1, %t2
+ %t4 = extractelement <2 x i32> %t3, i32 0
+ ret i32 %t4
+}
+
diff --git a/test/Transforms/InstCombine/bitcast-sext-vector.ll b/test/Transforms/InstCombine/bitcast-sext-vector.ll
new file mode 100644
index 0000000..d70bdba
--- /dev/null
+++ b/test/Transforms/InstCombine/bitcast-sext-vector.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; CHECK: sext
+; Don't fold zero/sign extensions with a bitcast between a vector and scalar.
+
+define i32 @t(<4 x i8> %src1, <4 x i8> %src2) nounwind readonly {
+entry:
+ %cmp = icmp eq <4 x i8> %src1, %src2; <<4 x i1>> [#uses=1]
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %val = bitcast <4 x i8> %sext to i32
+ ret i32 %val
+}
diff --git a/test/Transforms/InstCombine/bitcast-vec-canon.ll b/test/Transforms/InstCombine/bitcast-vec-canon.ll
new file mode 100644
index 0000000..d27765e
--- /dev/null
+++ b/test/Transforms/InstCombine/bitcast-vec-canon.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -S | grep element | count 4
+
+define double @a(<1 x i64> %y) {
+ %c = bitcast <1 x i64> %y to double
+ ret double %c
+}
+
+define i64 @b(<1 x i64> %y) {
+ %c = bitcast <1 x i64> %y to i64
+ ret i64 %c
+}
+
+define <1 x i64> @c(double %y) {
+ %c = bitcast double %y to <1 x i64>
+ ret <1 x i64> %c
+}
+
+define <1 x i64> @d(i64 %y) {
+ %c = bitcast i64 %y to <1 x i64>
+ ret <1 x i64> %c
+}
+
diff --git a/test/Transforms/InstCombine/bitcast-vector-fold.ll b/test/Transforms/InstCombine/bitcast-vector-fold.ll
new file mode 100644
index 0000000..8feec22
--- /dev/null
+++ b/test/Transforms/InstCombine/bitcast-vector-fold.ll
@@ -0,0 +1,33 @@
+; RUN: opt < %s -instcombine -S | not grep bitcast
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+
+define <2 x i64> @test1() {
+ %tmp3 = bitcast <4 x i32> < i32 0, i32 1, i32 2, i32 3 > to <2 x i64>
+ ret <2 x i64> %tmp3
+}
+
+define <4 x i32> @test2() {
+ %tmp3 = bitcast <2 x i64> < i64 0, i64 1 > to <4 x i32>
+ ret <4 x i32> %tmp3
+}
+
+define <2 x double> @test3() {
+ %tmp3 = bitcast <4 x i32> < i32 0, i32 1, i32 2, i32 3 > to <2 x double>
+ ret <2 x double> %tmp3
+}
+
+define <4 x float> @test4() {
+ %tmp3 = bitcast <2 x i64> < i64 0, i64 1 > to <4 x float>
+ ret <4 x float> %tmp3
+}
+
+define <2 x i64> @test5() {
+ %tmp3 = bitcast <4 x float> <float 0.0, float 1.0, float 2.0, float 3.0> to <2 x i64>
+ ret <2 x i64> %tmp3
+}
+
+define <4 x i32> @test6() {
+ %tmp3 = bitcast <2 x double> <double 0.5, double 1.0> to <4 x i32>
+ ret <4 x i32> %tmp3
+}
diff --git a/test/Transforms/InstCombine/bitcount.ll b/test/Transforms/InstCombine/bitcount.ll
new file mode 100644
index 0000000..f75ca2d
--- /dev/null
+++ b/test/Transforms/InstCombine/bitcount.ll
@@ -0,0 +1,19 @@
+; Tests to make sure bit counts of constants are folded
+; RUN: opt < %s -instcombine -S | grep {ret i32 19}
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v declare | not grep llvm.ct
+
+declare i31 @llvm.ctpop.i31(i31 %val)
+declare i32 @llvm.cttz.i32(i32 %val)
+declare i33 @llvm.ctlz.i33(i33 %val)
+
+define i32 @test(i32 %A) {
+ %c1 = call i31 @llvm.ctpop.i31(i31 12415124)
+ %c2 = call i32 @llvm.cttz.i32(i32 87359874)
+ %c3 = call i33 @llvm.ctlz.i33(i33 87359874)
+ %t1 = zext i31 %c1 to i32
+ %t3 = trunc i33 %c3 to i32
+ %r1 = add i32 %t1, %c2
+ %r2 = add i32 %r1, %t3
+ ret i32 %r2
+}
diff --git a/test/Transforms/InstCombine/bittest.ll b/test/Transforms/InstCombine/bittest.ll
new file mode 100644
index 0000000..92863d5
--- /dev/null
+++ b/test/Transforms/InstCombine/bittest.ll
@@ -0,0 +1,30 @@
+; RUN: opt < %s -instcombine -simplifycfg -S |\
+; RUN: not grep {call void @abort}
+
+@b_rec.0 = external global i32 ; <i32*> [#uses=2]
+
+define void @_Z12h000007_testv(i32* %P) {
+entry:
+ %tmp.2 = load i32* @b_rec.0 ; <i32> [#uses=1]
+ %tmp.9 = or i32 %tmp.2, -989855744 ; <i32> [#uses=2]
+ %tmp.16 = and i32 %tmp.9, -805306369 ; <i32> [#uses=2]
+ %tmp.17 = and i32 %tmp.9, -973078529 ; <i32> [#uses=1]
+ store i32 %tmp.17, i32* @b_rec.0
+ %tmp.17.shrunk = bitcast i32 %tmp.16 to i32 ; <i32> [#uses=1]
+ %tmp.22 = and i32 %tmp.17.shrunk, -1073741824 ; <i32> [#uses=1]
+ %tmp.23 = icmp eq i32 %tmp.22, -1073741824 ; <i1> [#uses=1]
+ br i1 %tmp.23, label %endif.0, label %then.0
+
+then.0: ; preds = %entry
+ tail call void @abort( )
+ unreachable
+
+endif.0: ; preds = %entry
+ %tmp.17.shrunk2 = bitcast i32 %tmp.16 to i32 ; <i32> [#uses=1]
+ %tmp.27.mask = and i32 %tmp.17.shrunk2, 100663295 ; <i32> [#uses=1]
+ store i32 %tmp.27.mask, i32* %P
+ ret void
+}
+
+declare void @abort()
+
diff --git a/test/Transforms/InstCombine/bswap-fold.ll b/test/Transforms/InstCombine/bswap-fold.ll
new file mode 100644
index 0000000..034c70e
--- /dev/null
+++ b/test/Transforms/InstCombine/bswap-fold.ll
@@ -0,0 +1,69 @@
+; RUN: opt < %s -instcombine -S | not grep call.*bswap
+
+define i1 @test1(i16 %tmp2) {
+ %tmp10 = call i16 @llvm.bswap.i16( i16 %tmp2 )
+ %tmp = icmp eq i16 %tmp10, 1
+ ret i1 %tmp
+}
+
+define i1 @test2(i32 %tmp) {
+ %tmp34 = tail call i32 @llvm.bswap.i32( i32 %tmp )
+ %tmp.upgrd.1 = icmp eq i32 %tmp34, 1
+ ret i1 %tmp.upgrd.1
+}
+
+declare i32 @llvm.bswap.i32(i32)
+
+define i1 @test3(i64 %tmp) {
+ %tmp34 = tail call i64 @llvm.bswap.i64( i64 %tmp )
+ %tmp.upgrd.2 = icmp eq i64 %tmp34, 1
+ ret i1 %tmp.upgrd.2
+}
+
+declare i64 @llvm.bswap.i64(i64)
+
+declare i16 @llvm.bswap.i16(i16)
+
+; rdar://5992453
+; A & 255
+define i32 @test4(i32 %a) nounwind {
+entry:
+ %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
+ %tmp4 = lshr i32 %tmp2, 24
+ ret i32 %tmp4
+}
+
+; A
+define i32 @test5(i32 %a) nounwind {
+entry:
+ %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
+ %tmp4 = tail call i32 @llvm.bswap.i32( i32 %tmp2 )
+ ret i32 %tmp4
+}
+
+; a >> 24
+define i32 @test6(i32 %a) nounwind {
+entry:
+ %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
+ %tmp4 = and i32 %tmp2, 255
+ ret i32 %tmp4
+}
+
+; PR5284
+declare i64 @llvm.bswap.i64(i64)
+declare i32 @llvm.bswap.i32(i32)
+declare i16 @llvm.bswap.i16(i16)
+
+define i16 @test7(i32 %A) {
+ %B = tail call i32 @llvm.bswap.i32(i32 %A) nounwind
+ %C = trunc i32 %B to i16
+ %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
+ ret i16 %D
+}
+
+define i16 @test8(i64 %A) {
+ %B = tail call i64 @llvm.bswap.i64(i64 %A) nounwind
+ %C = trunc i64 %B to i16
+ %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
+ ret i16 %D
+}
diff --git a/test/Transforms/InstCombine/bswap.ll b/test/Transforms/InstCombine/bswap.ll
new file mode 100644
index 0000000..168b3e8
--- /dev/null
+++ b/test/Transforms/InstCombine/bswap.ll
@@ -0,0 +1,74 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {call.*llvm.bswap} | count 6
+
+define i32 @test1(i32 %i) {
+ %tmp1 = lshr i32 %i, 24 ; <i32> [#uses=1]
+ %tmp3 = lshr i32 %i, 8 ; <i32> [#uses=1]
+ %tmp4 = and i32 %tmp3, 65280 ; <i32> [#uses=1]
+ %tmp5 = or i32 %tmp1, %tmp4 ; <i32> [#uses=1]
+ %tmp7 = shl i32 %i, 8 ; <i32> [#uses=1]
+ %tmp8 = and i32 %tmp7, 16711680 ; <i32> [#uses=1]
+ %tmp9 = or i32 %tmp5, %tmp8 ; <i32> [#uses=1]
+ %tmp11 = shl i32 %i, 24 ; <i32> [#uses=1]
+ %tmp12 = or i32 %tmp9, %tmp11 ; <i32> [#uses=1]
+ ret i32 %tmp12
+}
+
+define i32 @test2(i32 %arg) {
+ %tmp2 = shl i32 %arg, 24 ; <i32> [#uses=1]
+ %tmp4 = shl i32 %arg, 8 ; <i32> [#uses=1]
+ %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
+ %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1]
+ %tmp8 = lshr i32 %arg, 8 ; <i32> [#uses=1]
+ %tmp9 = and i32 %tmp8, 65280 ; <i32> [#uses=1]
+ %tmp10 = or i32 %tmp6, %tmp9 ; <i32> [#uses=1]
+ %tmp12 = lshr i32 %arg, 24 ; <i32> [#uses=1]
+ %tmp14 = or i32 %tmp10, %tmp12 ; <i32> [#uses=1]
+ ret i32 %tmp14
+}
+
+define i16 @test3(i16 %s) {
+ %tmp2 = lshr i16 %s, 8 ; <i16> [#uses=1]
+ %tmp4 = shl i16 %s, 8 ; <i16> [#uses=1]
+ %tmp5 = or i16 %tmp2, %tmp4 ; <i16> [#uses=1]
+ ret i16 %tmp5
+}
+
+define i16 @test4(i16 %s) {
+ %tmp2 = lshr i16 %s, 8 ; <i16> [#uses=1]
+ %tmp4 = shl i16 %s, 8 ; <i16> [#uses=1]
+ %tmp5 = or i16 %tmp4, %tmp2 ; <i16> [#uses=1]
+ ret i16 %tmp5
+}
+
+define i16 @test5(i16 %a) {
+ %tmp = zext i16 %a to i32 ; <i32> [#uses=2]
+ %tmp1 = and i32 %tmp, 65280 ; <i32> [#uses=1]
+ %tmp2 = ashr i32 %tmp1, 8 ; <i32> [#uses=1]
+ %tmp2.upgrd.1 = trunc i32 %tmp2 to i16 ; <i16> [#uses=1]
+ %tmp4 = and i32 %tmp, 255 ; <i32> [#uses=1]
+ %tmp5 = shl i32 %tmp4, 8 ; <i32> [#uses=1]
+ %tmp5.upgrd.2 = trunc i32 %tmp5 to i16 ; <i16> [#uses=1]
+ %tmp.upgrd.3 = or i16 %tmp2.upgrd.1, %tmp5.upgrd.2 ; <i16> [#uses=1]
+ %tmp6 = bitcast i16 %tmp.upgrd.3 to i16 ; <i16> [#uses=1]
+ %tmp6.upgrd.4 = zext i16 %tmp6 to i32 ; <i32> [#uses=1]
+ %retval = trunc i32 %tmp6.upgrd.4 to i16 ; <i16> [#uses=1]
+ ret i16 %retval
+}
+
+; PR2842
+define i32 @test6(i32 %x) nounwind readnone {
+ %tmp = shl i32 %x, 16 ; <i32> [#uses=1]
+ %x.mask = and i32 %x, 65280 ; <i32> [#uses=1]
+ %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1]
+ %tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1]
+ %tmp3 = or i32 %x.mask, %tmp ; <i32> [#uses=1]
+ %tmp4 = or i32 %tmp3, %tmp2 ; <i32> [#uses=1]
+ %tmp5 = shl i32 %tmp4, 8 ; <i32> [#uses=1]
+ %tmp6 = lshr i32 %x, 24 ; <i32> [#uses=1]
+ %tmp7 = or i32 %tmp5, %tmp6 ; <i32> [#uses=1]
+ ret i32 %tmp7
+}
+
diff --git a/test/Transforms/InstCombine/call-cast-target.ll b/test/Transforms/InstCombine/call-cast-target.ll
new file mode 100644
index 0000000..7addc8a
--- /dev/null
+++ b/test/Transforms/InstCombine/call-cast-target.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep call | not grep bitcast
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+
+define i32 @main() {
+entry:
+ %tmp = call i32 bitcast (i8* (i32*)* @ctime to i32 (i32*)*)( i32* null ) ; <i32> [#uses=1]
+ ret i32 %tmp
+}
+
+declare i8* @ctime(i32*)
+
diff --git a/test/Transforms/InstCombine/call-intrinsics.ll b/test/Transforms/InstCombine/call-intrinsics.ll
new file mode 100644
index 0000000..f9d1080
--- /dev/null
+++ b/test/Transforms/InstCombine/call-intrinsics.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine | llvm-dis
+
+@X = global i8 0 ; <i8*> [#uses=3]
+@Y = global i8 12 ; <i8*> [#uses=2]
+
+declare void @llvm.memmove.i32(i8*, i8*, i32, i32)
+
+declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
+
+declare void @llvm.memset.i32(i8*, i8, i32, i32)
+
+define void @zero_byte_test() {
+ ; These process zero bytes, so they are a noop.
+ call void @llvm.memmove.i32( i8* @X, i8* @Y, i32 0, i32 100 )
+ call void @llvm.memcpy.i32( i8* @X, i8* @Y, i32 0, i32 100 )
+ call void @llvm.memset.i32( i8* @X, i8 123, i32 0, i32 100 )
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/call.ll b/test/Transforms/InstCombine/call.ll
new file mode 100644
index 0000000..dd65b96
--- /dev/null
+++ b/test/Transforms/InstCombine/call.ll
@@ -0,0 +1,118 @@
+; Ignore stderr, we expect warnings there
+; RUN: opt < %s -instcombine 2> /dev/null -S | FileCheck %s
+
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+; Simple case, argument translatable without changing the value
+declare void @test1a(i8*)
+
+define void @test1(i32* %A) {
+ call void bitcast (void (i8*)* @test1a to void (i32*)*)( i32* %A )
+ ret void
+; CHECK: %tmp = bitcast i32* %A to i8*
+; CHECK: call void @test1a(i8* %tmp)
+; CHECK: ret void
+}
+
+; More complex case, translate argument because of resolution. This is safe
+; because we have the body of the function
+define void @test2a(i8 %A) {
+ ret void
+; CHECK: ret void
+}
+
+define i32 @test2(i32 %A) {
+ call void bitcast (void (i8)* @test2a to void (i32)*)( i32 %A )
+ ret i32 %A
+; CHECK: %tmp = trunc i32 %A to i8
+; CHECK: call void @test2a(i8 %tmp)
+; CHECK: ret i32 %A
+}
+
+
+; Resolving this should insert a cast from sbyte to int, following the C
+; promotion rules.
+declare void @test3a(i8, ...)
+
+define void @test3(i8 %A, i8 %B) {
+ call void bitcast (void (i8, ...)* @test3a to void (i8, i8)*)( i8 %A, i8 %B
+)
+ ret void
+; CHECK: %tmp = zext i8 %B to i32
+; CHECK: call void (i8, ...)* @test3a(i8 %A, i32 %tmp)
+; CHECK: ret void
+}
+
+
+; test conversion of return value...
+define i8 @test4a() {
+ ret i8 0
+; CHECK: ret i8 0
+}
+
+define i32 @test4() {
+ %X = call i32 bitcast (i8 ()* @test4a to i32 ()*)( ) ; <i32> [#uses=1]
+ ret i32 %X
+; CHECK: %X1 = call i8 @test4a()
+; CHECK: %tmp = zext i8 %X1 to i32
+; CHECK: ret i32 %tmp
+}
+
+
+; test conversion of return value... no value conversion occurs so we can do
+; this with just a prototype...
+declare i32 @test5a()
+
+define i32 @test5() {
+ %X = call i32 @test5a( ) ; <i32> [#uses=1]
+ ret i32 %X
+; CHECK: %X = call i32 @test5a()
+; CHECK: ret i32 %X
+}
+
+
+; test addition of new arguments...
+declare i32 @test6a(i32)
+
+define i32 @test6() {
+ %X = call i32 bitcast (i32 (i32)* @test6a to i32 ()*)( )
+ ret i32 %X
+; CHECK: %X1 = call i32 @test6a(i32 0)
+; CHECK: ret i32 %X1
+}
+
+
+; test removal of arguments, only can happen with a function body
+define void @test7a() {
+ ret void
+; CHECK: ret void
+}
+
+define void @test7() {
+ call void bitcast (void ()* @test7a to void (i32)*)( i32 5 )
+ ret void
+; CHECK: call void @test7a()
+; CHECK: ret void
+}
+
+
+; rdar://7590304
+declare void @test8a()
+
+define i8* @test8() {
+ invoke arm_apcscc void @test8a()
+ to label %invoke.cont unwind label %try.handler
+
+invoke.cont: ; preds = %entry
+ unreachable
+
+try.handler: ; preds = %entry
+ ret i8* null
+}
+
+; Don't turn this into "unreachable": the callee and caller don't agree in
+; calling conv, but the implementation of test8a may actually end up using the
+; right calling conv.
+; CHECK: @test8() {
+; CHECK-NEXT: invoke arm_apcscc void @test8a()
+
diff --git a/test/Transforms/InstCombine/call2.ll b/test/Transforms/InstCombine/call2.ll
new file mode 100644
index 0000000..3a6bd67
--- /dev/null
+++ b/test/Transforms/InstCombine/call2.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -instcombine | llvm-dis
+
+; This used to crash trying to do a double-to-pointer conversion
+define i32 @bar() {
+entry:
+ %retval = alloca i32, align 4 ; <i32*> [#uses=1]
+ "alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp = call i32 (...)* bitcast (i32 (i8*)* @f to i32 (...)*)( double 3.000000e+00 ) ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load i32* %retval ; <i32> [#uses=1]
+ ret i32 %retval1
+}
+
+define i32 @f(i8* %p) {
+entry:
+ %p_addr = alloca i8* ; <i8**> [#uses=1]
+ %retval = alloca i32, align 4 ; <i32*> [#uses=1]
+ "alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store i8* %p, i8** %p_addr
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load i32* %retval ; <i32> [#uses=1]
+ ret i32 %retval1
+}
diff --git a/test/Transforms/InstCombine/canonicalize_branch.ll b/test/Transforms/InstCombine/canonicalize_branch.ll
new file mode 100644
index 0000000..24090ab
--- /dev/null
+++ b/test/Transforms/InstCombine/canonicalize_branch.ll
@@ -0,0 +1,44 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @test1(i32 %X, i32 %Y) {
+ %C = icmp ne i32 %X, %Y
+ br i1 %C, label %T, label %F
+
+; CHECK: @test1
+; CHECK: %C = icmp eq i32 %X, %Y
+; CHECK: br i1 %C, label %F, label %T
+
+T:
+ ret i32 12
+F:
+ ret i32 123
+}
+
+define i32 @test2(i32 %X, i32 %Y) {
+ %C = icmp ule i32 %X, %Y
+ br i1 %C, label %T, label %F
+
+; CHECK: @test2
+; CHECK: %C = icmp ugt i32 %X, %Y
+; CHECK: br i1 %C, label %F, label %T
+
+T:
+ ret i32 12
+F:
+ ret i32 123
+}
+
+define i32 @test3(i32 %X, i32 %Y) {
+ %C = icmp uge i32 %X, %Y
+ br i1 %C, label %T, label %F
+
+; CHECK: @test3
+; CHECK: %C = icmp ult i32 %X, %Y
+; CHECK: br i1 %C, label %F, label %T
+
+T:
+ ret i32 12
+F:
+ ret i32 123
+}
+
diff --git a/test/Transforms/InstCombine/cast-mul-select.ll b/test/Transforms/InstCombine/cast-mul-select.ll
new file mode 100644
index 0000000..f55423c
--- /dev/null
+++ b/test/Transforms/InstCombine/cast-mul-select.ll
@@ -0,0 +1,41 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32"
+
+define i32 @mul(i32 %x, i32 %y) {
+ %A = trunc i32 %x to i8
+ %B = trunc i32 %y to i8
+ %C = mul i8 %A, %B
+ %D = zext i8 %C to i32
+ ret i32 %D
+; CHECK: %C = mul i32 %x, %y
+; CHECK: %D = and i32 %C, 255
+; CHECK: ret i32 %D
+}
+
+define i32 @select1(i1 %cond, i32 %x, i32 %y, i32 %z) {
+ %A = trunc i32 %x to i8
+ %B = trunc i32 %y to i8
+ %C = trunc i32 %z to i8
+ %D = add i8 %A, %B
+ %E = select i1 %cond, i8 %C, i8 %D
+ %F = zext i8 %E to i32
+ ret i32 %F
+; CHECK: %D = add i32 %x, %y
+; CHECK: %E = select i1 %cond, i32 %z, i32 %D
+; CHECK: %F = and i32 %E, 255
+; CHECK: ret i32 %F
+}
+
+define i8 @select2(i1 %cond, i8 %x, i8 %y, i8 %z) {
+ %A = zext i8 %x to i32
+ %B = zext i8 %y to i32
+ %C = zext i8 %z to i32
+ %D = add i32 %A, %B
+ %E = select i1 %cond, i32 %C, i32 %D
+ %F = trunc i32 %E to i8
+ ret i8 %F
+; CHECK: %D = add i8 %x, %y
+; CHECK: %E = select i1 %cond, i8 %z, i8 %D
+; CHECK: ret i8 %E
+}
diff --git a/test/Transforms/InstCombine/cast-set.ll b/test/Transforms/InstCombine/cast-set.ll
new file mode 100644
index 0000000..8934404
--- /dev/null
+++ b/test/Transforms/InstCombine/cast-set.ll
@@ -0,0 +1,65 @@
+; This tests for various complex cast elimination cases instcombine should
+; handle.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i1 @test1(i32 %X) {
+ %A = bitcast i32 %X to i32 ; <i32> [#uses=1]
+ ; Convert to setne int %X, 12
+ %c = icmp ne i32 %A, 12 ; <i1> [#uses=1]
+ ret i1 %c
+; CHECK: %c = icmp ne i32 %X, 12
+; CHECK: ret i1 %c
+}
+
+define i1 @test2(i32 %X, i32 %Y) {
+ %A = bitcast i32 %X to i32 ; <i32> [#uses=1]
+ %B = bitcast i32 %Y to i32 ; <i32> [#uses=1]
+ ; Convert to setne int %X, %Y
+ %c = icmp ne i32 %A, %B ; <i1> [#uses=1]
+ ret i1 %c
+; CHECK: %c = icmp ne i32 %X, %Y
+; CHECK: ret i1 %c
+}
+
+define i32 @test4(i32 %A) {
+ %B = bitcast i32 %A to i32 ; <i32> [#uses=1]
+ %C = shl i32 %B, 2 ; <i32> [#uses=1]
+ %D = bitcast i32 %C to i32 ; <i32> [#uses=1]
+ ret i32 %D
+; CHECK: %C = shl i32 %A, 2
+; CHECK: ret i32 %C
+}
+
+define i16 @test5(i16 %A) {
+ %B = sext i16 %A to i32 ; <i32> [#uses=1]
+ %C = and i32 %B, 15 ; <i32> [#uses=1]
+ %D = trunc i32 %C to i16 ; <i16> [#uses=1]
+ ret i16 %D
+; CHECK: %C = and i16 %A, 15
+; CHECK: ret i16 %C
+}
+
+define i1 @test6(i1 %A) {
+ %B = zext i1 %A to i32 ; <i32> [#uses=1]
+ %C = icmp ne i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 %A
+}
+
+define i1 @test6a(i1 %A) {
+ %B = zext i1 %A to i32 ; <i32> [#uses=1]
+ %C = icmp ne i32 %B, -1 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @test7(i8* %A) {
+ %B = bitcast i8* %A to i32* ; <i32*> [#uses=1]
+ %C = icmp eq i32* %B, null ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp eq i8* %A, null
+; CHECK: ret i1 %C
+}
diff --git a/test/Transforms/InstCombine/cast.ll b/test/Transforms/InstCombine/cast.ll
new file mode 100644
index 0000000..878da68
--- /dev/null
+++ b/test/Transforms/InstCombine/cast.ll
@@ -0,0 +1,607 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128-n8:16:32:64"
+
+@inbuf = external global [32832 x i8] ; <[32832 x i8]*> [#uses=1]
+
+define i32 @test1(i32 %A) {
+ %c1 = bitcast i32 %A to i32 ; <i32> [#uses=1]
+ %c2 = bitcast i32 %c1 to i32 ; <i32> [#uses=1]
+ ret i32 %c2
+; CHECK: ret i32 %A
+}
+
+define i64 @test2(i8 %A) {
+ %c1 = zext i8 %A to i16 ; <i16> [#uses=1]
+ %c2 = zext i16 %c1 to i32 ; <i32> [#uses=1]
+ %Ret = zext i32 %c2 to i64 ; <i64> [#uses=1]
+ ret i64 %Ret
+; CHECK: %Ret = zext i8 %A to i64
+; CHECK: ret i64 %Ret
+}
+
+; This function should just use bitwise AND
+define i64 @test3(i64 %A) {
+ %c1 = trunc i64 %A to i8 ; <i8> [#uses=1]
+ %c2 = zext i8 %c1 to i64 ; <i64> [#uses=1]
+ ret i64 %c2
+; CHECK: %c2 = and i64 %A, 255
+; CHECK: ret i64 %c2
+}
+
+define i32 @test4(i32 %A, i32 %B) {
+ %COND = icmp slt i32 %A, %B ; <i1> [#uses=1]
+ ; Booleans are unsigned integrals
+ %c = zext i1 %COND to i8 ; <i8> [#uses=1]
+ ; for the cast elim purpose
+ %result = zext i8 %c to i32 ; <i32> [#uses=1]
+ ret i32 %result
+; CHECK: %COND = icmp slt i32 %A, %B
+; CHECK: %result = zext i1 %COND to i32
+; CHECK: ret i32 %result
+}
+
+define i32 @test5(i1 %B) {
+ ; This cast should get folded into
+ %c = zext i1 %B to i8 ; <i8> [#uses=1]
+ ; this cast
+ %result = zext i8 %c to i32 ; <i32> [#uses=1]
+ ret i32 %result
+; CHECK: %result = zext i1 %B to i32
+; CHECK: ret i32 %result
+}
+
+define i32 @test6(i64 %A) {
+ %c1 = trunc i64 %A to i32 ; <i32> [#uses=1]
+ %res = bitcast i32 %c1 to i32 ; <i32> [#uses=1]
+ ret i32 %res
+; CHECK: trunc i64 %A to i32
+; CHECK-NEXT: ret i32
+}
+
+define i64 @test7(i1 %A) {
+ %c1 = zext i1 %A to i32 ; <i32> [#uses=1]
+ %res = sext i32 %c1 to i64 ; <i64> [#uses=1]
+ ret i64 %res
+; CHECK: %res = zext i1 %A to i64
+; CHECK: ret i64 %res
+}
+
+define i64 @test8(i8 %A) {
+ %c1 = sext i8 %A to i64 ; <i64> [#uses=1]
+ %res = bitcast i64 %c1 to i64 ; <i64> [#uses=1]
+ ret i64 %res
+; CHECK: = sext i8 %A to i64
+; CHECK-NEXT: ret i64
+}
+
+define i16 @test9(i16 %A) {
+ %c1 = sext i16 %A to i32 ; <i32> [#uses=1]
+ %c2 = trunc i32 %c1 to i16 ; <i16> [#uses=1]
+ ret i16 %c2
+; CHECK: ret i16 %A
+}
+
+define i16 @test10(i16 %A) {
+ %c1 = sext i16 %A to i32 ; <i32> [#uses=1]
+ %c2 = trunc i32 %c1 to i16 ; <i16> [#uses=1]
+ ret i16 %c2
+; CHECK: ret i16 %A
+}
+
+declare void @varargs(i32, ...)
+
+define void @test11(i32* %P) {
+ %c = bitcast i32* %P to i16* ; <i16*> [#uses=1]
+ call void (i32, ...)* @varargs( i32 5, i16* %c )
+ ret void
+; CHECK: call void (i32, ...)* @varargs(i32 5, i32* %P)
+; CHECK: ret void
+}
+
+define i32* @test12() {
+ %p = malloc [4 x i8] ; <[4 x i8]*> [#uses=1]
+ %c = bitcast [4 x i8]* %p to i32* ; <i32*> [#uses=1]
+ ret i32* %c
+; CHECK: %malloccall = tail call i8* @malloc(i32 4)
+; CHECK: ret i32* %c
+}
+
+define i8* @test13(i64 %A) {
+ %c = getelementptr [0 x i8]* bitcast ([32832 x i8]* @inbuf to [0 x i8]*), i64 0, i64 %A ; <i8*> [#uses=1]
+ ret i8* %c
+; CHECK: %c = getelementptr [32832 x i8]* @inbuf, i64 0, i64 %A
+; CHECK: ret i8* %c
+}
+
+define i1 @test14(i8 %A) {
+ %c = bitcast i8 %A to i8 ; <i8> [#uses=1]
+ %X = icmp ult i8 %c, -128 ; <i1> [#uses=1]
+ ret i1 %X
+; CHECK: %X = icmp sgt i8 %A, -1
+; CHECK: ret i1 %X
+}
+
+
+; This just won't occur when there's no difference between ubyte and sbyte
+;bool %test15(ubyte %A) {
+; %c = cast ubyte %A to sbyte
+; %X = setlt sbyte %c, 0 ; setgt %A, 127
+; ret bool %X
+;}
+
+define i1 @test16(i32* %P) {
+ %c = icmp ne i32* %P, null ; <i1> [#uses=1]
+ ret i1 %c
+; CHECK: %c = icmp ne i32* %P, null
+; CHECK: ret i1 %c
+}
+
+define i16 @test17(i1 %tmp3) {
+ %c = zext i1 %tmp3 to i32 ; <i32> [#uses=1]
+ %t86 = trunc i32 %c to i16 ; <i16> [#uses=1]
+ ret i16 %t86
+; CHECK: %t86 = zext i1 %tmp3 to i16
+; CHECK: ret i16 %t86
+}
+
+define i16 @test18(i8 %tmp3) {
+ %c = sext i8 %tmp3 to i32 ; <i32> [#uses=1]
+ %t86 = trunc i32 %c to i16 ; <i16> [#uses=1]
+ ret i16 %t86
+; CHECK: %t86 = sext i8 %tmp3 to i16
+; CHECK: ret i16 %t86
+}
+
+define i1 @test19(i32 %X) {
+ %c = sext i32 %X to i64 ; <i64> [#uses=1]
+ %Z = icmp slt i64 %c, 12345 ; <i1> [#uses=1]
+ ret i1 %Z
+; CHECK: %Z = icmp slt i32 %X, 12345
+; CHECK: ret i1 %Z
+}
+
+define i1 @test20(i1 %B) {
+ %c = zext i1 %B to i32 ; <i32> [#uses=1]
+ %D = icmp slt i32 %c, -1 ; <i1> [#uses=1]
+ ;; false
+ ret i1 %D
+; CHECK: ret i1 false
+}
+
+define i32 @test21(i32 %X) {
+ %c1 = trunc i32 %X to i8 ; <i8> [#uses=1]
+ ;; sext -> zext -> and -> nop
+ %c2 = sext i8 %c1 to i32 ; <i32> [#uses=1]
+ %RV = and i32 %c2, 255 ; <i32> [#uses=1]
+ ret i32 %RV
+; CHECK: %c21 = and i32 %X, 255
+; CHECK: ret i32 %c21
+}
+
+define i32 @test22(i32 %X) {
+ %c1 = trunc i32 %X to i8 ; <i8> [#uses=1]
+ ;; sext -> zext -> and -> nop
+ %c2 = sext i8 %c1 to i32 ; <i32> [#uses=1]
+ %RV = shl i32 %c2, 24 ; <i32> [#uses=1]
+ ret i32 %RV
+; CHECK: shl i32 %X, 24
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test23(i32 %X) {
+ ;; Turn into an AND even though X
+ %c1 = trunc i32 %X to i16 ; <i16> [#uses=1]
+ ;; and Z are signed.
+ %c2 = zext i16 %c1 to i32 ; <i32> [#uses=1]
+ ret i32 %c2
+; CHECK: %c2 = and i32 %X, 65535
+; CHECK: ret i32 %c2
+}
+
+define i1 @test24(i1 %C) {
+ %X = select i1 %C, i32 14, i32 1234 ; <i32> [#uses=1]
+ ;; Fold cast into select
+ %c = icmp ne i32 %X, 0 ; <i1> [#uses=1]
+ ret i1 %c
+; CHECK: ret i1 true
+}
+
+define void @test25(i32** %P) {
+ %c = bitcast i32** %P to float** ; <float**> [#uses=1]
+ ;; Fold cast into null
+ store float* null, float** %c
+ ret void
+; CHECK: store i32* null, i32** %P
+; CHECK: ret void
+}
+
+define i32 @test26(float %F) {
+ ;; no need to cast from float->double.
+ %c = fpext float %F to double ; <double> [#uses=1]
+ %D = fptosi double %c to i32 ; <i32> [#uses=1]
+ ret i32 %D
+; CHECK: %D = fptosi float %F to i32
+; CHECK: ret i32 %D
+}
+
+define [4 x float]* @test27([9 x [4 x float]]* %A) {
+ %c = bitcast [9 x [4 x float]]* %A to [4 x float]* ; <[4 x float]*> [#uses=1]
+ ret [4 x float]* %c
+; CHECK: %c = getelementptr inbounds [9 x [4 x float]]* %A, i64 0, i64 0
+; CHECK: ret [4 x float]* %c
+}
+
+define float* @test28([4 x float]* %A) {
+ %c = bitcast [4 x float]* %A to float* ; <float*> [#uses=1]
+ ret float* %c
+; CHECK: %c = getelementptr inbounds [4 x float]* %A, i64 0, i64 0
+; CHECK: ret float* %c
+}
+
+define i32 @test29(i32 %c1, i32 %c2) {
+ %tmp1 = trunc i32 %c1 to i8 ; <i8> [#uses=1]
+ %tmp4.mask = trunc i32 %c2 to i8 ; <i8> [#uses=1]
+ %tmp = or i8 %tmp4.mask, %tmp1 ; <i8> [#uses=1]
+ %tmp10 = zext i8 %tmp to i32 ; <i32> [#uses=1]
+ ret i32 %tmp10
+; CHECK: %tmp2 = or i32 %c2, %c1
+; CHECK: %tmp10 = and i32 %tmp2, 255
+; CHECK: ret i32 %tmp10
+}
+
+define i32 @test30(i32 %c1) {
+ %c2 = trunc i32 %c1 to i8 ; <i8> [#uses=1]
+ %c3 = xor i8 %c2, 1 ; <i8> [#uses=1]
+ %c4 = zext i8 %c3 to i32 ; <i32> [#uses=1]
+ ret i32 %c4
+; CHECK: %c3 = and i32 %c1, 255
+; CHECK: %c4 = xor i32 %c3, 1
+; CHECK: ret i32 %c4
+}
+
+define i1 @test31(i64 %A) {
+ %B = trunc i64 %A to i32 ; <i32> [#uses=1]
+ %C = and i32 %B, 42 ; <i32> [#uses=1]
+ %D = icmp eq i32 %C, 10 ; <i1> [#uses=1]
+ ret i1 %D
+; CHECK: %C1 = and i64 %A, 42
+; CHECK: %D = icmp eq i64 %C1, 10
+; CHECK: ret i1 %D
+}
+
+define void @test32(double** %tmp) {
+ %tmp8 = malloc [16 x i8] ; <[16 x i8]*> [#uses=1]
+ %tmp8.upgrd.1 = bitcast [16 x i8]* %tmp8 to double* ; <double*> [#uses=1]
+ store double* %tmp8.upgrd.1, double** %tmp
+ ret void
+; CHECK: %malloccall = tail call i8* @malloc(i32 16)
+; CHECK: %tmp8.upgrd.1 = bitcast i8* %malloccall to double*
+; CHECK: store double* %tmp8.upgrd.1, double** %tmp
+; CHECK: ret void
+}
+
+define i32 @test33(i32 %c1) {
+ %x = bitcast i32 %c1 to float ; <float> [#uses=1]
+ %y = bitcast float %x to i32 ; <i32> [#uses=1]
+ ret i32 %y
+; CHECK: ret i32 %c1
+}
+
+define i16 @test34(i16 %a) {
+ %c1 = zext i16 %a to i32 ; <i32> [#uses=1]
+ %tmp21 = lshr i32 %c1, 8 ; <i32> [#uses=1]
+ %c2 = trunc i32 %tmp21 to i16 ; <i16> [#uses=1]
+ ret i16 %c2
+; CHECK: %tmp21 = lshr i16 %a, 8
+; CHECK: ret i16 %tmp21
+}
+
+define i16 @test35(i16 %a) {
+ %c1 = bitcast i16 %a to i16 ; <i16> [#uses=1]
+ %tmp2 = lshr i16 %c1, 8 ; <i16> [#uses=1]
+ %c2 = bitcast i16 %tmp2 to i16 ; <i16> [#uses=1]
+ ret i16 %c2
+; CHECK: %tmp2 = lshr i16 %a, 8
+; CHECK: ret i16 %tmp2
+}
+
+; icmp sgt i32 %a, -1
+; rdar://6480391
+define i1 @test36(i32 %a) {
+ %b = lshr i32 %a, 31
+ %c = trunc i32 %b to i8
+ %d = icmp eq i8 %c, 0
+ ret i1 %d
+; CHECK: %d = icmp sgt i32 %a, -1
+; CHECK: ret i1 %d
+}
+
+; ret i1 false
+define i1 @test37(i32 %a) {
+ %b = lshr i32 %a, 31
+ %c = or i32 %b, 512
+ %d = trunc i32 %c to i8
+ %e = icmp eq i8 %d, 11
+ ret i1 %e
+; CHECK: ret i1 false
+}
+
+define i64 @test38(i32 %a) {
+ %1 = icmp eq i32 %a, -2
+ %2 = zext i1 %1 to i8
+ %3 = xor i8 %2, 1
+ %4 = zext i8 %3 to i64
+ ret i64 %4
+; CHECK: %1 = icmp ne i32 %a, -2
+; CHECK: %2 = zext i1 %1 to i64
+; CHECK: ret i64 %2
+}
+
+define i16 @test39(i16 %a) {
+ %tmp = zext i16 %a to i32
+ %tmp21 = lshr i32 %tmp, 8
+ %tmp5 = shl i32 %tmp, 8
+ %tmp.upgrd.32 = or i32 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16
+ ret i16 %tmp.upgrd.3
+; CHECK: @test39
+; CHECK: %tmp.upgrd.32 = call i16 @llvm.bswap.i16(i16 %a)
+; CHECK: ret i16 %tmp.upgrd.32
+}
+
+define i16 @test40(i16 %a) {
+ %tmp = zext i16 %a to i32
+ %tmp21 = lshr i32 %tmp, 9
+ %tmp5 = shl i32 %tmp, 8
+ %tmp.upgrd.32 = or i32 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16
+ ret i16 %tmp.upgrd.3
+; CHECK: @test40
+; CHECK: %tmp21 = lshr i16 %a, 9
+; CHECK: %tmp5 = shl i16 %a, 8
+; CHECK: %tmp.upgrd.32 = or i16 %tmp21, %tmp5
+; CHECK: ret i16 %tmp.upgrd.32
+}
+
+; PR1263
+define i32* @test41(i32* %tmp1) {
+ %tmp64 = bitcast i32* %tmp1 to { i32 }*
+ %tmp65 = getelementptr { i32 }* %tmp64, i32 0, i32 0
+ ret i32* %tmp65
+; CHECK: @test41
+; CHECK: ret i32* %tmp1
+}
+
+define i32 @test42(i32 %X) {
+ %Y = trunc i32 %X to i8 ; <i8> [#uses=1]
+ %Z = zext i8 %Y to i32 ; <i32> [#uses=1]
+ ret i32 %Z
+; CHECK: @test42
+; CHECK: %Z = and i32 %X, 255
+}
+
+; rdar://6598839
+define zeroext i64 @test43(i8 zeroext %on_off) nounwind readonly {
+ %A = zext i8 %on_off to i32
+ %B = add i32 %A, -1
+ %C = sext i32 %B to i64
+ ret i64 %C ;; Should be (add (zext i8 -> i64), -1)
+; CHECK: @test43
+; CHECK-NEXT: %A = zext i8 %on_off to i64
+; CHECK-NEXT: %B = add i64 %A, -1
+; CHECK-NEXT: ret i64 %B
+}
+
+define i64 @test44(i8 %T) {
+ %A = zext i8 %T to i16
+ %B = or i16 %A, 1234
+ %C = zext i16 %B to i64
+ ret i64 %C
+; CHECK: @test44
+; CHECK-NEXT: %A = zext i8 %T to i64
+; CHECK-NEXT: %B = or i64 %A, 1234
+; CHECK-NEXT: ret i64 %B
+}
+
+define i64 @test45(i8 %A, i64 %Q) {
+ %D = trunc i64 %Q to i32 ;; should be removed
+ %B = sext i8 %A to i32
+ %C = or i32 %B, %D
+ %E = zext i32 %C to i64
+ ret i64 %E
+; CHECK: @test45
+; CHECK-NEXT: %B = sext i8 %A to i64
+; CHECK-NEXT: %C = or i64 %B, %Q
+; CHECK-NEXT: %E = and i64 %C, 4294967295
+; CHECK-NEXT: ret i64 %E
+}
+
+
+define i64 @test46(i64 %A) {
+ %B = trunc i64 %A to i32
+ %C = and i32 %B, 42
+ %D = shl i32 %C, 8
+ %E = zext i32 %D to i64
+ ret i64 %E
+; CHECK: @test46
+; CHECK-NEXT: %C = shl i64 %A, 8
+; CHECK-NEXT: %D = and i64 %C, 10752
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test47(i8 %A) {
+ %B = sext i8 %A to i32
+ %C = or i32 %B, 42
+ %E = zext i32 %C to i64
+ ret i64 %E
+; CHECK: @test47
+; CHECK-NEXT: %B = sext i8 %A to i64
+; CHECK-NEXT: %C = or i64 %B, 42
+; CHECK-NEXT: %E = and i64 %C, 4294967295
+; CHECK-NEXT: ret i64 %E
+}
+
+define i64 @test48(i8 %A, i8 %a) {
+ %b = zext i8 %a to i32
+ %B = zext i8 %A to i32
+ %C = shl i32 %B, 8
+ %D = or i32 %C, %b
+ %E = zext i32 %D to i64
+ ret i64 %E
+; CHECK: @test48
+; CHECK-NEXT: %b = zext i8 %a to i64
+; CHECK-NEXT: %B = zext i8 %A to i64
+; CHECK-NEXT: %C = shl i64 %B, 8
+; CHECK-NEXT: %D = or i64 %C, %b
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test49(i64 %A) {
+ %B = trunc i64 %A to i32
+ %C = or i32 %B, 1
+ %D = sext i32 %C to i64
+ ret i64 %D
+; CHECK: @test49
+; CHECK-NEXT: %C = shl i64 %A, 32
+; CHECK-NEXT: ashr i64 %C, 32
+; CHECK-NEXT: %D = or i64 {{.*}}, 1
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test50(i64 %A) {
+ %a = lshr i64 %A, 2
+ %B = trunc i64 %a to i32
+ %D = add i32 %B, -1
+ %E = sext i32 %D to i64
+ ret i64 %E
+; CHECK: @test50
+; CHECK-NEXT: shl i64 %A, 30
+; CHECK-NEXT: add i64 {{.*}}, -4294967296
+; CHECK-NEXT: %E = ashr i64 {{.*}}, 32
+; CHECK-NEXT: ret i64 %E
+}
+
+define i64 @test51(i64 %A, i1 %cond) {
+ %B = trunc i64 %A to i32
+ %C = and i32 %B, -2
+ %D = or i32 %B, 1
+ %E = select i1 %cond, i32 %C, i32 %D
+ %F = sext i32 %E to i64
+ ret i64 %F
+; CHECK: @test51
+
+; FIXME: disabled, see PR5997
+; HECK-NEXT: %C = and i64 %A, 4294967294
+; HECK-NEXT: %D = or i64 %A, 1
+; HECK-NEXT: %E = select i1 %cond, i64 %C, i64 %D
+; HECK-NEXT: %sext = shl i64 %E, 32
+; HECK-NEXT: %F = ashr i64 %sext, 32
+; HECK-NEXT: ret i64 %F
+}
+
+define i32 @test52(i64 %A) {
+ %B = trunc i64 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = zext i16 %D to i32
+ ret i32 %E
+; CHECK: @test52
+; CHECK-NEXT: %B = trunc i64 %A to i32
+; CHECK-NEXT: %C = or i32 %B, 32962
+; CHECK-NEXT: %D = and i32 %C, 40186
+; CHECK-NEXT: ret i32 %D
+}
+
+define i64 @test53(i32 %A) {
+ %B = trunc i32 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = zext i16 %D to i64
+ ret i64 %E
+; CHECK: @test53
+; CHECK-NEXT: %B = zext i32 %A to i64
+; CHECK-NEXT: %C = or i64 %B, 32962
+; CHECK-NEXT: %D = and i64 %C, 40186
+; CHECK-NEXT: ret i64 %D
+}
+
+define i32 @test54(i64 %A) {
+ %B = trunc i64 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = sext i16 %D to i32
+ ret i32 %E
+; CHECK: @test54
+; CHECK-NEXT: %B = trunc i64 %A to i32
+; CHECK-NEXT: %C = or i32 %B, -32574
+; CHECK-NEXT: %D = and i32 %C, -25350
+; CHECK-NEXT: ret i32 %D
+}
+
+define i64 @test55(i32 %A) {
+ %B = trunc i32 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = sext i16 %D to i64
+ ret i64 %E
+; CHECK: @test55
+; CHECK-NEXT: %B = zext i32 %A to i64
+; CHECK-NEXT: %C = or i64 %B, -32574
+; CHECK-NEXT: %D = and i64 %C, -25350
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test56(i16 %A) nounwind {
+ %tmp353 = sext i16 %A to i32
+ %tmp354 = lshr i32 %tmp353, 5
+ %tmp355 = zext i32 %tmp354 to i64
+ ret i64 %tmp355
+; CHECK: @test56
+; CHECK-NEXT: %tmp353 = sext i16 %A to i64
+; CHECK-NEXT: %tmp354 = lshr i64 %tmp353, 5
+; CHECK-NEXT: %tmp355 = and i64 %tmp354, 134217727
+; CHECK-NEXT: ret i64 %tmp355
+}
+
+define i64 @test57(i64 %A) nounwind {
+ %B = trunc i64 %A to i32
+ %C = lshr i32 %B, 8
+ %E = zext i32 %C to i64
+ ret i64 %E
+; CHECK: @test57
+; CHECK-NEXT: %C = lshr i64 %A, 8
+; CHECK-NEXT: %E = and i64 %C, 16777215
+; CHECK-NEXT: ret i64 %E
+}
+
+define i64 @test58(i64 %A) nounwind {
+ %B = trunc i64 %A to i32
+ %C = lshr i32 %B, 8
+ %D = or i32 %C, 128
+ %E = zext i32 %D to i64
+ ret i64 %E
+
+; CHECK: @test58
+; CHECK-NEXT: %C = lshr i64 %A, 8
+; CHECK-NEXT: %D = or i64 %C, 128
+; CHECK-NEXT: %E = and i64 %D, 16777215
+; CHECK-NEXT: ret i64 %E
+}
+
+define i64 @test59(i8 %A, i8 %B) nounwind {
+ %C = zext i8 %A to i32
+ %D = shl i32 %C, 4
+ %E = and i32 %D, 48
+ %F = zext i8 %B to i32
+ %G = lshr i32 %F, 4
+ %H = or i32 %G, %E
+ %I = zext i32 %H to i64
+ ret i64 %I
+; CHECK: @test59
+; CHECK-NEXT: %C = zext i8 %A to i64
+; CHECK-NOT: i32
+; CHECK: %F = zext i8 %B to i64
+; CHECK-NOT: i32
+; CHECK: ret i64 %H
+}
diff --git a/test/Transforms/InstCombine/cast_ptr.ll b/test/Transforms/InstCombine/cast_ptr.ll
new file mode 100644
index 0000000..09910fb
--- /dev/null
+++ b/test/Transforms/InstCombine/cast_ptr.ll
@@ -0,0 +1,79 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "p:32:32"
+
+; This shouldn't convert to getelementptr because the relationship
+; between the arithmetic and the layout of allocated memory is
+; entirely unknown.
+; CHECK: @test1
+; CHECK: ptrtoint
+; CHECK: add
+; CHECK: inttoptr
+define i8* @test1(i8* %t) {
+ %tmpc = ptrtoint i8* %t to i32 ; <i32> [#uses=1]
+ %tmpa = add i32 %tmpc, 32 ; <i32> [#uses=1]
+ %tv = inttoptr i32 %tmpa to i8* ; <i8*> [#uses=1]
+ ret i8* %tv
+}
+
+; These casts should be folded away.
+; CHECK: @test2
+; CHECK: icmp eq i8* %a, %b
+define i1 @test2(i8* %a, i8* %b) {
+ %tmpa = ptrtoint i8* %a to i32 ; <i32> [#uses=1]
+ %tmpb = ptrtoint i8* %b to i32 ; <i32> [#uses=1]
+ %r = icmp eq i32 %tmpa, %tmpb ; <i1> [#uses=1]
+ ret i1 %r
+}
+
+; These casts should also be folded away.
+; CHECK: @test3
+; CHECK: icmp eq i8* %a, @global
+@global = global i8 0
+define i1 @test3(i8* %a) {
+ %tmpa = ptrtoint i8* %a to i32
+ %r = icmp eq i32 %tmpa, ptrtoint (i8* @global to i32)
+ ret i1 %r
+}
+
+define i1 @test4(i32 %A) {
+ %B = inttoptr i32 %A to i8*
+ %C = icmp eq i8* %B, null
+ ret i1 %C
+; CHECK: @test4
+; CHECK-NEXT: %C = icmp eq i32 %A, 0
+; CHECK-NEXT: ret i1 %C
+}
+
+
+; Pulling the cast out of the load allows us to eliminate the load, and then
+; the whole array.
+
+ %op = type { float }
+ %unop = type { i32 }
+@Array = internal constant [1 x %op* (%op*)*] [ %op* (%op*)* @foo ] ; <[1 x %op* (%op*)*]*> [#uses=1]
+
+declare %op* @foo(%op* %X)
+
+define %unop* @test5(%op* %O) {
+ %tmp = load %unop* (%op*)** bitcast ([1 x %op* (%op*)*]* @Array to %unop* (%op*)**); <%unop* (%op*)*> [#uses=1]
+ %tmp.2 = call %unop* %tmp( %op* %O ) ; <%unop*> [#uses=1]
+ ret %unop* %tmp.2
+; CHECK: @test5
+; CHECK: call %op* @foo(%op* %O)
+}
+
+
+
+; InstCombine can not 'load (cast P)' -> cast (load P)' if the cast changes
+; the address space.
+
+define i8 @test6(i8 addrspace(1)* %source) {
+entry:
+ %arrayidx223 = bitcast i8 addrspace(1)* %source to i8*
+ %tmp4 = load i8* %arrayidx223
+ ret i8 %tmp4
+; CHECK: @test6
+; CHECK: load i8* %arrayidx223
+}
diff --git a/test/Transforms/InstCombine/compare-signs.ll b/test/Transforms/InstCombine/compare-signs.ll
new file mode 100644
index 0000000..f8e4911
--- /dev/null
+++ b/test/Transforms/InstCombine/compare-signs.ll
@@ -0,0 +1,58 @@
+; RUN: opt %s -instcombine -S | FileCheck %s
+; PR5438
+
+; TODO: This should also optimize down.
+;define i32 @test1(i32 %a, i32 %b) nounwind readnone {
+;entry:
+; %0 = icmp sgt i32 %a, -1 ; <i1> [#uses=1]
+; %1 = icmp slt i32 %b, 0 ; <i1> [#uses=1]
+; %2 = xor i1 %1, %0 ; <i1> [#uses=1]
+; %3 = zext i1 %2 to i32 ; <i32> [#uses=1]
+; ret i32 %3
+;}
+
+; TODO: This optimizes partially but not all the way.
+;define i32 @test2(i32 %a, i32 %b) nounwind readnone {
+;entry:
+; %0 = and i32 %a, 8 ;<i32> [#uses=1]
+; %1 = and i32 %b, 8 ;<i32> [#uses=1]
+; %2 = icmp eq i32 %0, %1 ;<i1> [#uses=1]
+; %3 = zext i1 %2 to i32 ;<i32> [#uses=1]
+; ret i32 %3
+;}
+
+define i32 @test3(i32 %a, i32 %b) nounwind readnone {
+; CHECK: @test3
+entry:
+; CHECK: xor i32 %a, %b
+; CHECK: lshr i32 %0, 31
+; CHECK: xor i32 %1, 1
+ %0 = lshr i32 %a, 31 ; <i32> [#uses=1]
+ %1 = lshr i32 %b, 31 ; <i32> [#uses=1]
+ %2 = icmp eq i32 %0, %1 ; <i1> [#uses=1]
+ %3 = zext i1 %2 to i32 ; <i32> [#uses=1]
+ ret i32 %3
+; CHECK-NOT: icmp
+; CHECK-NOT: zext
+; CHECK: ret i32 %2
+}
+
+; Variation on @test3: checking the 2nd bit in a situation where the 5th bit
+; is one, not zero.
+define i32 @test3i(i32 %a, i32 %b) nounwind readnone {
+; CHECK: @test3i
+entry:
+; CHECK: xor i32 %a, %b
+; CHECK: lshr i32 %0, 31
+; CHECK: xor i32 %1, 1
+ %0 = lshr i32 %a, 29 ; <i32> [#uses=1]
+ %1 = lshr i32 %b, 29 ; <i32> [#uses=1]
+ %2 = or i32 %0, 35
+ %3 = or i32 %1, 35
+ %4 = icmp eq i32 %2, %3 ; <i1> [#uses=1]
+ %5 = zext i1 %4 to i32 ; <i32> [#uses=1]
+ ret i32 %5
+; CHECK-NOT: icmp
+; CHECK-NOT: zext
+; CHECK: ret i32 %2
+}
diff --git a/test/Transforms/InstCombine/constant-fold-compare.ll b/test/Transforms/InstCombine/constant-fold-compare.ll
new file mode 100644
index 0000000..6e41e2f
--- /dev/null
+++ b/test/Transforms/InstCombine/constant-fold-compare.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
+
+define i32 @a() nounwind readnone {
+entry:
+ ret i32 zext (i1 icmp eq (i32 0, i32 ptrtoint (i32 ()* @a to i32)) to i32)
+}
+; CHECK: ret i32 0
diff --git a/test/Transforms/InstCombine/constant-fold-gep.ll b/test/Transforms/InstCombine/constant-fold-gep.ll
new file mode 100644
index 0000000..4be1a9c
--- /dev/null
+++ b/test/Transforms/InstCombine/constant-fold-gep.ll
@@ -0,0 +1,55 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
+
+; Constant folding should fix notionally out-of-bounds indices
+; and add inbounds keywords.
+
+%struct.X = type { [3 x i32], [3 x i32] }
+
+@Y = internal global [3 x %struct.X] zeroinitializer
+
+define void @frob() {
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 0), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 1), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 1), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 2), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 2), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 0), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 3), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 1), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 4), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 2), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 5), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 6), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 1), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 7), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 2), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 8), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 0), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 9), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 1), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 10), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 2), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 11), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 12), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 1), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 13), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 2), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 14), align 8
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 15), align 8
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 1), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 16), align 8
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 2), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 17), align 8
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 1, i64 0, i32 0, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 18), align 8
+; CHECK: store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 2, i64 0, i32 0, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 36), align 8
+; CHECK: store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 1, i64 0, i32 0, i64 1), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 19), align 8
+ ret void
+}
diff --git a/test/Transforms/InstCombine/constant-fold-ptr-casts.ll b/test/Transforms/InstCombine/constant-fold-ptr-casts.ll
new file mode 100644
index 0000000..9b6c6c3
--- /dev/null
+++ b/test/Transforms/InstCombine/constant-fold-ptr-casts.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -S | grep {ret i32 2143034560}
+
+; Instcombine should be able to completely fold this code.
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+
+@bar = constant [3 x i64] [i64 9220983451228067448, i64 9220983451228067449, i64 9220983450959631991], align 8
+
+define i32 @foo() nounwind {
+entry:
+ %tmp87.2 = load i64* inttoptr (i32 add (i32 16, i32 ptrtoint ([3 x i64]* @bar to i32)) to i64*), align 8
+ %t0 = bitcast i64 %tmp87.2 to double
+ %tmp9192.2 = fptrunc double %t0 to float
+ %t1 = bitcast float %tmp9192.2 to i32
+ ret i32 %t1
+}
+
diff --git a/test/Transforms/InstCombine/crash.ll b/test/Transforms/InstCombine/crash.ll
new file mode 100644
index 0000000..2faa539
--- /dev/null
+++ b/test/Transforms/InstCombine/crash.ll
@@ -0,0 +1,239 @@
+; RUN: opt < %s -instcombine -S
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128:n8:16:32"
+target triple = "i386-apple-darwin10.0"
+
+define i32 @test0(i8 %tmp2) ssp {
+entry:
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp8 = lshr i32 %tmp3, 6
+ %tmp9 = lshr i32 %tmp3, 7
+ %tmp10 = xor i32 %tmp9, 67108858
+ %tmp11 = xor i32 %tmp10, %tmp8
+ %tmp12 = xor i32 %tmp11, 0
+ ret i32 %tmp12
+}
+
+; PR4905
+define <2 x i64> @test1(<2 x i64> %x, <2 x i64> %y) nounwind {
+entry:
+ %conv.i94 = bitcast <2 x i64> %y to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %sub.i97 = sub <4 x i32> %conv.i94, undef ; <<4 x i32>> [#uses=1]
+ %conv3.i98 = bitcast <4 x i32> %sub.i97 to <2 x i64> ; <<2 x i64>> [#uses=2]
+ %conv2.i86 = bitcast <2 x i64> %conv3.i98 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %cmp.i87 = icmp sgt <4 x i32> undef, %conv2.i86 ; <<4 x i1>> [#uses=1]
+ %sext.i88 = sext <4 x i1> %cmp.i87 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %conv3.i89 = bitcast <4 x i32> %sext.i88 to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %and.i = and <2 x i64> %conv3.i89, %conv3.i98 ; <<2 x i64>> [#uses=1]
+ %or.i = or <2 x i64> zeroinitializer, %and.i ; <<2 x i64>> [#uses=1]
+ %conv2.i43 = bitcast <2 x i64> %or.i to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %sub.i = sub <4 x i32> zeroinitializer, %conv2.i43 ; <<4 x i32>> [#uses=1]
+ %conv3.i44 = bitcast <4 x i32> %sub.i to <2 x i64> ; <<2 x i64>> [#uses=1]
+ ret <2 x i64> %conv3.i44
+}
+
+
+; PR4908
+define void @test2(<1 x i16>* nocapture %b, i32* nocapture %c) nounwind ssp {
+entry:
+ %arrayidx = getelementptr inbounds <1 x i16>* %b, i64 undef ; <<1 x i16>*>
+ %tmp2 = load <1 x i16>* %arrayidx ; <<1 x i16>> [#uses=1]
+ %tmp6 = bitcast <1 x i16> %tmp2 to i16 ; <i16> [#uses=1]
+ %tmp7 = zext i16 %tmp6 to i32 ; <i32> [#uses=1]
+ %ins = or i32 0, %tmp7 ; <i32> [#uses=1]
+ %arrayidx20 = getelementptr inbounds i32* %c, i64 undef ; <i32*> [#uses=1]
+ store i32 %ins, i32* %arrayidx20
+ ret void
+}
+
+; PR5262
+@tmp2 = global i64 0 ; <i64*> [#uses=1]
+
+declare void @use(i64) nounwind
+
+define void @foo(i1) nounwind align 2 {
+; <label>:1
+ br i1 %0, label %2, label %3
+
+; <label>:2 ; preds = %1
+ br label %3
+
+; <label>:3 ; preds = %2, %1
+ %4 = phi i8 [ 1, %2 ], [ 0, %1 ] ; <i8> [#uses=1]
+ %5 = icmp eq i8 %4, 0 ; <i1> [#uses=1]
+ %6 = load i64* @tmp2, align 8 ; <i64> [#uses=1]
+ %7 = select i1 %5, i64 0, i64 %6 ; <i64> [#uses=1]
+ br label %8
+
+; <label>:8 ; preds = %3
+ call void @use(i64 %7)
+ ret void
+}
+
+%t0 = type { i32, i32 }
+%t1 = type { i32, i32, i32, i32, i32* }
+
+declare %t0* @bar2(i64)
+
+define void @bar3(i1, i1) nounwind align 2 {
+; <label>:2
+ br i1 %1, label %10, label %3
+
+; <label>:3 ; preds = %2
+ %4 = getelementptr inbounds %t0* null, i64 0, i32 1 ; <i32*> [#uses=0]
+ %5 = getelementptr inbounds %t1* null, i64 0, i32 4 ; <i32**> [#uses=1]
+ %6 = load i32** %5, align 8 ; <i32*> [#uses=1]
+ %7 = icmp ne i32* %6, null ; <i1> [#uses=1]
+ %8 = zext i1 %7 to i32 ; <i32> [#uses=1]
+ %9 = add i32 %8, 0 ; <i32> [#uses=1]
+ br label %10
+
+; <label>:10 ; preds = %3, %2
+ %11 = phi i32 [ %9, %3 ], [ 0, %2 ] ; <i32> [#uses=1]
+ br i1 %1, label %12, label %13
+
+; <label>:12 ; preds = %10
+ br label %13
+
+; <label>:13 ; preds = %12, %10
+ %14 = zext i32 %11 to i64 ; <i64> [#uses=1]
+ %15 = tail call %t0* @bar2(i64 %14) nounwind ; <%0*> [#uses=0]
+ ret void
+}
+
+
+
+
+; PR5262
+; Make sure the PHI node gets put in a place where all of its operands dominate
+; it.
+define i64 @test4(i1 %c, i64* %P) nounwind align 2 {
+BB0:
+ br i1 %c, label %BB1, label %BB2
+
+BB1:
+ br label %BB2
+
+BB2:
+ %v5_ = phi i1 [ true, %BB0], [false, %BB1]
+ %v6 = load i64* %P
+ br label %l8
+
+l8:
+ br label %l10
+
+l10:
+ %v11 = select i1 %v5_, i64 0, i64 %v6
+ ret i64 %v11
+}
+
+; PR5471
+define arm_apcscc i32 @test5a() {
+ ret i32 0
+}
+
+define arm_apcscc void @test5() {
+ store i1 true, i1* undef
+ %1 = invoke i32 @test5a() to label %exit unwind label %exit
+exit:
+ ret void
+}
+
+
+; PR5673
+
+@test6g = external global i32*
+
+define arm_aapcs_vfpcc i32 @test6(i32 %argc, i8** %argv) nounwind {
+entry:
+ store i32* getelementptr (i32* bitcast (i32 (i32, i8**)* @test6 to i32*), i32 -2048), i32** @test6g, align 4
+ unreachable
+}
+
+
+; PR5827
+
+%class.RuleBasedBreakIterator = type { i64 ()* }
+%class.UStack = type { i8** }
+
+define i32 @_ZN22RuleBasedBreakIterator15checkDictionaryEi(%class.RuleBasedBreakIterator* %this, i32 %x) align 2 {
+entry:
+ %breaks = alloca %class.UStack, align 4 ; <%class.UStack*> [#uses=3]
+ call void @_ZN6UStackC1Ei(%class.UStack* %breaks, i32 0)
+ %tobool = icmp ne i32 %x, 0 ; <i1> [#uses=1]
+ br i1 %tobool, label %cond.end, label %cond.false
+
+terminate.handler: ; preds = %ehcleanup
+ %exc = call i8* @llvm.eh.exception() ; <i8*> [#uses=1]
+ %0 = call i32 (i8*, i8*, ...)* @llvm.eh.selector(i8* %exc, i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*), i32 1) ; <i32> [#uses=0]
+ call void @_ZSt9terminatev() noreturn nounwind
+ unreachable
+
+ehcleanup: ; preds = %cond.false
+ %exc1 = call i8* @llvm.eh.exception() ; <i8*> [#uses=2]
+ %1 = call i32 (i8*, i8*, ...)* @llvm.eh.selector(i8* %exc1, i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*), i8* null) ; <i32> [#uses=0]
+ invoke void @_ZN6UStackD1Ev(%class.UStack* %breaks)
+ to label %cont unwind label %terminate.handler
+
+cont: ; preds = %ehcleanup
+ call void @_Unwind_Resume_or_Rethrow(i8* %exc1)
+ unreachable
+
+cond.false: ; preds = %entry
+ %tmp4 = getelementptr inbounds %class.RuleBasedBreakIterator* %this, i32 0, i32 0 ; <i64 ()**> [#uses=1]
+ %tmp5 = load i64 ()** %tmp4 ; <i64 ()*> [#uses=1]
+ %call = invoke i64 %tmp5()
+ to label %cond.end unwind label %ehcleanup ; <i64> [#uses=1]
+
+cond.end: ; preds = %cond.false, %entry
+ %cond = phi i64 [ 0, %entry ], [ %call, %cond.false ] ; <i64> [#uses=1]
+ %conv = trunc i64 %cond to i32 ; <i32> [#uses=1]
+ call void @_ZN6UStackD1Ev(%class.UStack* %breaks)
+ ret i32 %conv
+}
+
+declare void @_ZN6UStackC1Ei(%class.UStack*, i32)
+
+declare void @_ZN6UStackD1Ev(%class.UStack*)
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @llvm.eh.exception() nounwind readonly
+
+declare i32 @llvm.eh.selector(i8*, i8*, ...) nounwind
+
+declare void @_ZSt9terminatev()
+
+declare void @_Unwind_Resume_or_Rethrow(i8*)
+
+
+
+; rdar://7590304
+define i8* @test10(i8* %self, i8* %tmp3) {
+entry:
+ store i1 true, i1* undef
+ store i1 true, i1* undef
+ invoke arm_apcscc void @test10a()
+ to label %invoke.cont unwind label %try.handler ; <i8*> [#uses=0]
+
+invoke.cont: ; preds = %entry
+ unreachable
+
+try.handler: ; preds = %entry
+ ret i8* %self
+}
+
+define void @test10a() {
+ ret void
+}
+
+
+; PR6193
+define i32 @test11(i32 %aMaskWidth, i8 %aStride) nounwind {
+entry:
+ %conv41 = sext i8 %aStride to i32
+ %neg = xor i32 %conv41, -1
+ %and42 = and i32 %aMaskWidth, %neg
+ %and47 = and i32 130, %conv41
+ %or = or i32 %and42, %and47
+ ret i32 %or
+}
diff --git a/test/Transforms/InstCombine/dce-iterate.ll b/test/Transforms/InstCombine/dce-iterate.ll
new file mode 100644
index 0000000..1d2cc53
--- /dev/null
+++ b/test/Transforms/InstCombine/dce-iterate.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -S | grep {ret double .sy}
+
+define internal double @ScaleObjectAdd(double %sx, double %sy, double %sz) nounwind {
+entry:
+ %sx34 = bitcast double %sx to i64 ; <i64> [#uses=1]
+ %sx3435 = zext i64 %sx34 to i960 ; <i960> [#uses=1]
+ %sy22 = bitcast double %sy to i64 ; <i64> [#uses=1]
+ %sy2223 = zext i64 %sy22 to i960 ; <i960> [#uses=1]
+ %sy222324 = shl i960 %sy2223, 320 ; <i960> [#uses=1]
+ %sy222324.ins = or i960 %sx3435, %sy222324 ; <i960> [#uses=1]
+ %sz10 = bitcast double %sz to i64 ; <i64> [#uses=1]
+ %sz1011 = zext i64 %sz10 to i960 ; <i960> [#uses=1]
+ %sz101112 = shl i960 %sz1011, 640 ; <i960> [#uses=1]
+ %sz101112.ins = or i960 %sy222324.ins, %sz101112
+
+ %a = trunc i960 %sz101112.ins to i64 ; <i64> [#uses=1]
+ %b = bitcast i64 %a to double ; <double> [#uses=1]
+ %c = lshr i960 %sz101112.ins, 320 ; <i960> [#uses=1]
+ %d = trunc i960 %c to i64 ; <i64> [#uses=1]
+ %e = bitcast i64 %d to double ; <double> [#uses=1]
+ %f = fadd double %b, %e
+
+ ret double %e
+}
diff --git a/test/Transforms/InstCombine/deadcode.ll b/test/Transforms/InstCombine/deadcode.ll
new file mode 100644
index 0000000..52af0ef
--- /dev/null
+++ b/test/Transforms/InstCombine/deadcode.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -S | grep {ret i32 %A}
+; RUN: opt < %s -die -S | not grep call.*llvm.stacksave
+
+define i32 @test(i32 %A) {
+ %X = or i1 false, false
+ br i1 %X, label %T, label %C
+
+T: ; preds = %0
+ %B = add i32 %A, 1
+ br label %C
+
+C: ; preds = %T, %0
+ %C.upgrd.1 = phi i32 [ %B, %T ], [ %A, %0 ]
+ ret i32 %C.upgrd.1
+}
+
+define i32* @test2(i32 %width) {
+ %tmp = call i8* @llvm.stacksave( )
+ %tmp14 = alloca i32, i32 %width
+ ret i32* %tmp14
+}
+
+declare i8* @llvm.stacksave()
+
diff --git a/test/Transforms/InstCombine/dg.exp b/test/Transforms/InstCombine/dg.exp
new file mode 100644
index 0000000..f200589
--- /dev/null
+++ b/test/Transforms/InstCombine/dg.exp
@@ -0,0 +1,3 @@
+load_lib llvm.exp
+
+RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
diff --git a/test/Transforms/InstCombine/div-cmp-overflow.ll b/test/Transforms/InstCombine/div-cmp-overflow.ll
new file mode 100644
index 0000000..6f63adc
--- /dev/null
+++ b/test/Transforms/InstCombine/div-cmp-overflow.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | not grep sdiv
+; PR2740
+
+define i1 @func_75(i32 %i2) nounwind {
+ %i3 = sdiv i32 %i2, -1328634635
+ %i4 = icmp eq i32 %i3, -1
+ ret i1 %i4
+}
diff --git a/test/Transforms/InstCombine/div.ll b/test/Transforms/InstCombine/div.ll
new file mode 100644
index 0000000..0d13980
--- /dev/null
+++ b/test/Transforms/InstCombine/div.ll
@@ -0,0 +1,84 @@
+; This test makes sure that div instructions are properly eliminated.
+
+; RUN: opt < %s -instcombine -S | not grep div
+
+define i32 @test1(i32 %A) {
+ %B = sdiv i32 %A, 1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test2(i32 %A) {
+ ; => Shift
+ %B = udiv i32 %A, 8 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test3(i32 %A) {
+ ; => 0, don't need to keep traps
+ %B = sdiv i32 0, %A ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test4(i32 %A) {
+ ; 0-A
+ %B = sdiv i32 %A, -1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test5(i32 %A) {
+ %B = udiv i32 %A, -16 ; <i32> [#uses=1]
+ %C = udiv i32 %B, -4 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i1 @test6(i32 %A) {
+ %B = udiv i32 %A, 123 ; <i32> [#uses=1]
+ ; A < 123
+ %C = icmp eq i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test7(i32 %A) {
+ %B = udiv i32 %A, 10 ; <i32> [#uses=1]
+ ; A >= 20 && A < 30
+ %C = icmp eq i32 %B, 2 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test8(i8 %A) {
+ %B = udiv i8 %A, 123 ; <i8> [#uses=1]
+ ; A >= 246
+ %C = icmp eq i8 %B, 2 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test9(i8 %A) {
+ %B = udiv i8 %A, 123 ; <i8> [#uses=1]
+ ; A < 246
+ %C = icmp ne i8 %B, 2 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i32 @test10(i32 %X, i1 %C) {
+ %V = select i1 %C, i32 64, i32 8 ; <i32> [#uses=1]
+ %R = udiv i32 %X, %V ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @test11(i32 %X, i1 %C) {
+ %A = select i1 %C, i32 1024, i32 32 ; <i32> [#uses=1]
+ %B = udiv i32 %X, %A ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+; PR2328
+define i32 @test12(i32 %x) nounwind {
+ %tmp3 = udiv i32 %x, %x ; 1
+ ret i32 %tmp3
+}
+
+define i32 @test13(i32 %x) nounwind {
+ %tmp3 = sdiv i32 %x, %x ; 1
+ ret i32 %tmp3
+}
+
diff --git a/test/Transforms/InstCombine/enforce-known-alignment.ll b/test/Transforms/InstCombine/enforce-known-alignment.ll
new file mode 100644
index 0000000..9e9be7f
--- /dev/null
+++ b/test/Transforms/InstCombine/enforce-known-alignment.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -S | grep alloca | grep {align 16}
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+
+define void @foo(i32) {
+ %2 = alloca [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>], align 16 ; <[3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>]*> [#uses=1]
+ %3 = getelementptr [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>]* %2, i32 0, i32 0 ; <<{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>*> [#uses=1]
+ %4 = getelementptr <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>* %3, i32 0, i32 0 ; <{ { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }*> [#uses=1]
+ %5 = getelementptr { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }* %4, i32 0, i32 0 ; <{ [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }*> [#uses=1]
+ %6 = bitcast { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }* %5 to { [8 x i16] }* ; <{ [8 x i16] }*> [#uses=1]
+ %7 = getelementptr { [8 x i16] }* %6, i32 0, i32 0 ; <[8 x i16]*> [#uses=1]
+ %8 = getelementptr [8 x i16]* %7, i32 0, i32 0 ; <i16*> [#uses=1]
+ store i16 0, i16* %8, align 16
+ call void @bar(i16* %8)
+ ret void
+}
+
+declare void @bar(i16*)
diff --git a/test/Transforms/InstCombine/exact-sdiv.ll b/test/Transforms/InstCombine/exact-sdiv.ll
new file mode 100644
index 0000000..e567754
--- /dev/null
+++ b/test/Transforms/InstCombine/exact-sdiv.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; CHECK: define i32 @foo
+; CHECK: sdiv i32 %x, 8
+define i32 @foo(i32 %x) {
+ %y = sdiv i32 %x, 8
+ ret i32 %y
+}
+
+; CHECK: define i32 @bar
+; CHECK: ashr i32 %x, 3
+define i32 @bar(i32 %x) {
+ %y = sdiv exact i32 %x, 8
+ ret i32 %y
+}
+
+; CHECK: i32 @a0
+; CHECK: %y = srem i32 %x, 3
+; CHECK: %z = sub i32 %x, %y
+; CHECK: ret i32 %z
+define i32 @a0(i32 %x) {
+ %y = sdiv i32 %x, 3
+ %z = mul i32 %y, 3
+ ret i32 %z
+}
+
+; CHECK: i32 @b0
+; CHECK: ret i32 %x
+define i32 @b0(i32 %x) {
+ %y = sdiv exact i32 %x, 3
+ %z = mul i32 %y, 3
+ ret i32 %z
+}
+
+; CHECK: i32 @a1
+; CHECK: %y = srem i32 %x, 3
+; CHECK: %z = sub i32 %y, %x
+; CHECK: ret i32 %z
+define i32 @a1(i32 %x) {
+ %y = sdiv i32 %x, 3
+ %z = mul i32 %y, -3
+ ret i32 %z
+}
+
+; CHECK: i32 @b1
+; CHECK: %z = sub i32 0, %x
+; CHECK: ret i32 %z
+define i32 @b1(i32 %x) {
+ %y = sdiv exact i32 %x, 3
+ %z = mul i32 %y, -3
+ ret i32 %z
+}
diff --git a/test/Transforms/InstCombine/extractvalue.ll b/test/Transforms/InstCombine/extractvalue.ll
new file mode 100644
index 0000000..875f860
--- /dev/null
+++ b/test/Transforms/InstCombine/extractvalue.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -instcombine -S | not grep extractvalue
+
+; Instcombine should fold various combinations of insertvalue and extractvalue
+; together
+declare void @bar({i32, i32} %a)
+
+define i32 @foo() {
+ ; Build a simple struct and pull values out again
+ %s1.1 = insertvalue {i32, i32} undef, i32 0, 0
+ %s1 = insertvalue {i32, i32} %s1.1, i32 1, 1
+ %v1 = extractvalue {i32, i32} %s1, 0
+ %v2 = extractvalue {i32, i32} %s1, 1
+
+ ; Build a nested struct and pull a sub struct out of it
+ ; This requires instcombine to insert a few insertvalue instructions
+ %ns1.1 = insertvalue {i32, {i32, i32}} undef, i32 %v1, 0
+ %ns1.2 = insertvalue {i32, {i32, i32}} %ns1.1, i32 %v1, 1, 0
+ %ns1 = insertvalue {i32, {i32, i32}} %ns1.2, i32 %v2, 1, 1
+ %s2 = extractvalue {i32, {i32, i32}} %ns1, 1
+ %v3 = extractvalue {i32, {i32, i32}} %ns1, 1, 1
+ call void @bar({i32, i32} %s2)
+
+ ; Use nested extractvalues to get to a value
+ %s3 = extractvalue {i32, {i32, i32}} %ns1, 1
+ %v4 = extractvalue {i32, i32} %s3, 1
+ call void @bar({i32, i32} %s3)
+
+ ; Use nested insertvalues to build a nested struct
+ %s4.1 = insertvalue {i32, i32} undef, i32 %v3, 0
+ %s4 = insertvalue {i32, i32} %s4.1, i32 %v4, 1
+ %ns2 = insertvalue {i32, {i32, i32}} undef, {i32, i32} %s4, 1
+
+ ; And now extract a single value from there
+ %v5 = extractvalue {i32, {i32, i32}} %ns2, 1, 1
+
+ ret i32 %v5
+}
+
diff --git a/test/Transforms/InstCombine/fold-bin-operand.ll b/test/Transforms/InstCombine/fold-bin-operand.ll
new file mode 100644
index 0000000..d0d072a
--- /dev/null
+++ b/test/Transforms/InstCombine/fold-bin-operand.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | not grep icmp
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+define i1 @f(i1 %x) {
+ %b = and i1 %x, icmp eq (i8* inttoptr (i32 1 to i8*), i8* inttoptr (i32 2 to i8*))
+ ret i1 %b
+}
+
+; FIXME: This doesn't fold at the moment!
+; define i32 @f(i32 %x) {
+; %b = add i32 %x, zext (i1 icmp eq (i8* inttoptr (i32 1000000 to i8*), i8* inttoptr (i32 2000000 to i8*)) to i32)
+; ret i32 %b
+;}
+
diff --git a/test/Transforms/InstCombine/fold-vector-zero.ll b/test/Transforms/InstCombine/fold-vector-zero.ll
new file mode 100644
index 0000000..e1d86b6
--- /dev/null
+++ b/test/Transforms/InstCombine/fold-vector-zero.ll
@@ -0,0 +1,35 @@
+; RUN: opt < %s -instcombine -S | not grep zeroinitializer
+
+define void @foo(i64 %A, i64 %B) {
+bb8:
+ br label %bb30
+
+bb30:
+ %s0 = phi i64 [ 0, %bb8 ], [ %r21, %bb30 ]
+ %l0 = phi i64 [ -2222, %bb8 ], [ %r23, %bb30 ]
+ %r2 = add i64 %s0, %B
+ %r3 = inttoptr i64 %r2 to <2 x double>*
+ %r4 = load <2 x double>* %r3, align 8
+ %r6 = bitcast <2 x double> %r4 to <2 x i64>
+ %r7 = bitcast <2 x double> zeroinitializer to <2 x i64>
+ %r8 = insertelement <2 x i64> undef, i64 9223372036854775807, i32 0
+ %r9 = insertelement <2 x i64> undef, i64 -9223372036854775808, i32 0
+ %r10 = insertelement <2 x i64> %r8, i64 9223372036854775807, i32 1
+ %r11 = insertelement <2 x i64> %r9, i64 -9223372036854775808, i32 1
+ %r12 = and <2 x i64> %r6, %r10
+ %r13 = and <2 x i64> %r7, %r11
+ %r14 = or <2 x i64> %r12, %r13
+ %r15 = bitcast <2 x i64> %r14 to <2 x double>
+ %r18 = add i64 %s0, %A
+ %r19 = inttoptr i64 %r18 to <2 x double>*
+ store <2 x double> %r15, <2 x double>* %r19, align 8
+ %r21 = add i64 16, %s0
+ %r23 = add i64 1, %l0
+ %r25 = icmp slt i64 %r23, 0
+ %r26 = zext i1 %r25 to i64
+ %r27 = icmp ne i64 %r26, 0
+ br i1 %r27, label %bb30, label %bb5
+
+bb5:
+ ret void
+}
diff --git a/test/Transforms/InstCombine/fp-ret-bitcast.ll b/test/Transforms/InstCombine/fp-ret-bitcast.ll
new file mode 100644
index 0000000..35ece42
--- /dev/null
+++ b/test/Transforms/InstCombine/fp-ret-bitcast.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {call float bitcast} | count 1
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+ %struct.NSObject = type { %struct.objc_class* }
+ %struct.NSArray = type { %struct.NSObject }
+ %struct.objc_class = type opaque
+ %struct.objc_selector = type opaque
+
+@"\01L_OBJC_METH_VAR_NAME_112" = internal global [15 x i8] c"whiteComponent\00", section "__TEXT,__cstring,cstring_literals"
+@"\01L_OBJC_SELECTOR_REFERENCES_81" = internal global %struct.objc_selector* bitcast ([15 x i8]* @"\01L_OBJC_METH_VAR_NAME_112" to %struct.objc_selector*), section "__OBJC,__message_refs,literal_pointers,no_dead_strip"
+
+define void @bork() nounwind {
+entry:
+ %color = alloca %struct.NSArray*
+ %color.466 = alloca %struct.NSObject*
+ %tmp103 = load %struct.NSArray** %color, align 4
+ %tmp103104 = getelementptr %struct.NSArray* %tmp103, i32 0, i32 0
+ store %struct.NSObject* %tmp103104, %struct.NSObject** %color.466, align 4
+ %tmp105 = load %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_81", align 4
+ %tmp106 = load %struct.NSObject** %color.466, align 4
+ %tmp107 = call float bitcast (void (%struct.NSObject*, ...)* @objc_msgSend_fpret to float (%struct.NSObject*, %struct.objc_selector*)*)( %struct.NSObject* %tmp106, %struct.objc_selector* %tmp105 ) nounwind
+ br label %exit
+
+exit:
+ ret void
+}
+
+declare void @objc_msgSend_fpret(%struct.NSObject*, ...)
diff --git a/test/Transforms/InstCombine/fpcast.ll b/test/Transforms/InstCombine/fpcast.ll
new file mode 100644
index 0000000..bc6aa0a
--- /dev/null
+++ b/test/Transforms/InstCombine/fpcast.ll
@@ -0,0 +1,15 @@
+; Test some floating point casting cases
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i8 @test1() {
+ %x = fptoui float 2.550000e+02 to i8 ; <i8> [#uses=1]
+ ret i8 %x
+; CHECK: ret i8 -1
+}
+
+define i8 @test2() {
+ %x = fptosi float -1.000000e+00 to i8 ; <i8> [#uses=1]
+ ret i8 %x
+; CHECK: ret i8 -1
+}
+
diff --git a/test/Transforms/InstCombine/fpextend.ll b/test/Transforms/InstCombine/fpextend.ll
new file mode 100644
index 0000000..70e0c62
--- /dev/null
+++ b/test/Transforms/InstCombine/fpextend.ll
@@ -0,0 +1,36 @@
+; RUN: opt < %s -instcombine -S | not grep fpext
+@X = external global float
+@Y = external global float
+
+define void @test() nounwind {
+entry:
+ %tmp = load float* @X, align 4 ; <float> [#uses=1]
+ %tmp1 = fpext float %tmp to double ; <double> [#uses=1]
+ %tmp3 = fadd double %tmp1, 0.000000e+00 ; <double> [#uses=1]
+ %tmp34 = fptrunc double %tmp3 to float ; <float> [#uses=1]
+ store float %tmp34, float* @X, align 4
+ ret void
+}
+
+define void @test3() nounwind {
+entry:
+ %tmp = load float* @X, align 4 ; <float> [#uses=1]
+ %tmp1 = fpext float %tmp to double ; <double> [#uses=1]
+ %tmp2 = load float* @Y, align 4 ; <float> [#uses=1]
+ %tmp23 = fpext float %tmp2 to double ; <double> [#uses=1]
+ %tmp5 = fdiv double %tmp1, %tmp23 ; <double> [#uses=1]
+ %tmp56 = fptrunc double %tmp5 to float ; <float> [#uses=1]
+ store float %tmp56, float* @X, align 4
+ ret void
+}
+
+define void @test4() nounwind {
+entry:
+ %tmp = load float* @X, align 4 ; <float> [#uses=1]
+ %tmp1 = fpext float %tmp to double ; <double> [#uses=1]
+ %tmp2 = fsub double -0.000000e+00, %tmp1 ; <double> [#uses=1]
+ %tmp34 = fptrunc double %tmp2 to float ; <float> [#uses=1]
+ store float %tmp34, float* @X, align 4
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/fsub.ll b/test/Transforms/InstCombine/fsub.ll
new file mode 100644
index 0000000..af2fadd
--- /dev/null
+++ b/test/Transforms/InstCombine/fsub.ll
@@ -0,0 +1,23 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; PR4374
+define float @test1(float %a, float %b) nounwind {
+ %t1 = fsub float %a, %b
+ %t2 = fsub float -0.000000e+00, %t1
+
+; CHECK: %t1 = fsub float %a, %b
+; CHECK-NEXT: %t2 = fsub float -0.000000e+00, %t1
+
+ ret float %t2
+}
+
+; <rdar://problem/7530098>
+define double @test2(double %x, double %y) nounwind {
+ %t1 = fadd double %x, %y
+ %t2 = fsub double %x, %t1
+
+; CHECK: %t1 = fadd double %x, %y
+; CHECK-NEXT: %t2 = fsub double %x, %t1
+
+ ret double %t2
+}
diff --git a/test/Transforms/InstCombine/getelementptr.ll b/test/Transforms/InstCombine/getelementptr.ll
new file mode 100644
index 0000000..f0bee4e
--- /dev/null
+++ b/test/Transforms/InstCombine/getelementptr.ll
@@ -0,0 +1,470 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64"
+%intstruct = type { i32 }
+%pair = type { i32, i32 }
+%struct.B = type { double }
+%struct.A = type { %struct.B, i32, i32 }
+
+
+@Global = constant [10 x i8] c"helloworld"
+
+; Test noop elimination
+define i32* @test1(i32* %I) {
+ %A = getelementptr i32* %I, i64 0
+ ret i32* %A
+; CHECK: @test1
+; CHECK: ret i32* %I
+}
+
+; Test noop elimination
+define i32* @test2(i32* %I) {
+ %A = getelementptr i32* %I
+ ret i32* %A
+; CHECK: @test2
+; CHECK: ret i32* %I
+}
+
+; Test that two array indexing geps fold
+define i32* @test3(i32* %I) {
+ %A = getelementptr i32* %I, i64 17
+ %B = getelementptr i32* %A, i64 4
+ ret i32* %B
+; CHECK: @test3
+; CHECK: getelementptr i32* %I, i64 21
+}
+
+; Test that two getelementptr insts fold
+define i32* @test4({ i32 }* %I) {
+ %A = getelementptr { i32 }* %I, i64 1
+ %B = getelementptr { i32 }* %A, i64 0, i32 0
+ ret i32* %B
+; CHECK: @test4
+; CHECK: getelementptr %intstruct* %I, i64 1, i32 0
+}
+
+define void @test5(i8 %B) {
+ ; This should be turned into a constexpr instead of being an instruction
+ %A = getelementptr [10 x i8]* @Global, i64 0, i64 4
+ store i8 %B, i8* %A
+ ret void
+; CHECK: @test5
+; CHECK: store i8 %B, i8* getelementptr inbounds ([10 x i8]* @Global, i64 0, i64 4)
+}
+
+define i32* @test6() {
+ %M = malloc [4 x i32]
+ %A = getelementptr [4 x i32]* %M, i64 0, i64 0
+ %B = getelementptr i32* %A, i64 2
+ ret i32* %B
+; CHECK: @test6
+; CHECK: getelementptr i8* %malloccall, i64 8
+}
+
+define i32* @test7(i32* %I, i64 %C, i64 %D) {
+ %A = getelementptr i32* %I, i64 %C
+ %B = getelementptr i32* %A, i64 %D
+ ret i32* %B
+; CHECK: @test7
+; CHECK: %A.sum = add i64 %C, %D
+; CHECK: getelementptr i32* %I, i64 %A.sum
+}
+
+define i8* @test8([10 x i32]* %X) {
+ ;; Fold into the cast.
+ %A = getelementptr [10 x i32]* %X, i64 0, i64 0
+ %B = bitcast i32* %A to i8*
+ ret i8* %B
+; CHECK: @test8
+; CHECK: bitcast [10 x i32]* %X to i8*
+}
+
+define i32 @test9() {
+ %A = getelementptr { i32, double }* null, i32 0, i32 1
+ %B = ptrtoint double* %A to i32
+ ret i32 %B
+; CHECK: @test9
+; CHECK: ret i32 8
+}
+
+define i1 @test10({ i32, i32 }* %x, { i32, i32 }* %y) {
+ %tmp.1 = getelementptr { i32, i32 }* %x, i32 0, i32 1
+ %tmp.3 = getelementptr { i32, i32 }* %y, i32 0, i32 1
+ ;; seteq x, y
+ %tmp.4 = icmp eq i32* %tmp.1, %tmp.3
+ ret i1 %tmp.4
+; CHECK: @test10
+; CHECK: icmp eq %pair* %x, %y
+}
+
+define i1 @test11({ i32, i32 }* %X) {
+ %P = getelementptr { i32, i32 }* %X, i32 0, i32 0
+ %Q = icmp eq i32* %P, null
+ ret i1 %Q
+; CHECK: @test11
+; CHECK: icmp eq %pair* %X, null
+}
+
+
+; PR4748
+define i32 @test12(%struct.A* %a) {
+entry:
+ %g3 = getelementptr %struct.A* %a, i32 0, i32 1
+ store i32 10, i32* %g3, align 4
+
+ %g4 = getelementptr %struct.A* %a, i32 0, i32 0
+
+ %new_a = bitcast %struct.B* %g4 to %struct.A*
+
+ %g5 = getelementptr %struct.A* %new_a, i32 0, i32 1
+ %a_a = load i32* %g5, align 4
+ ret i32 %a_a
+; CHECK: @test12
+; CHECK: getelementptr %struct.A* %a, i64 0, i32 1
+; CHECK-NEXT: store i32 10, i32* %g3
+; CHECK-NEXT: ret i32 10
+}
+
+
+; PR2235
+%S = type { i32, [ 100 x i32] }
+define i1 @test13(i64 %X, %S* %P) {
+ %A = getelementptr inbounds %S* %P, i32 0, i32 1, i64 %X
+ %B = getelementptr inbounds %S* %P, i32 0, i32 0
+ %C = icmp eq i32* %A, %B
+ ret i1 %C
+; CHECK: @test13
+; CHECK: %C = icmp eq i64 %X, -1
+}
+
+
+@G = external global [3 x i8]
+define i8* @test14(i32 %Idx) {
+ %idx = zext i32 %Idx to i64
+ %tmp = getelementptr i8* getelementptr ([3 x i8]* @G, i32 0, i32 0), i64 %idx
+ ret i8* %tmp
+; CHECK: @test14
+; CHECK: getelementptr [3 x i8]* @G, i64 0, i64 %idx
+}
+
+
+; Test folding of constantexpr geps into normal geps.
+@Array = external global [40 x i32]
+define i32 *@test15(i64 %X) {
+ %A = getelementptr i32* getelementptr ([40 x i32]* @Array, i64 0, i64 0), i64 %X
+ ret i32* %A
+; CHECK: @test15
+; CHECK: getelementptr [40 x i32]* @Array, i64 0, i64 %X
+}
+
+
+define i32* @test16(i32* %X, i32 %Idx) {
+ %R = getelementptr i32* %X, i32 %Idx
+ ret i32* %R
+; CHECK: @test16
+; CHECK: sext i32 %Idx to i64
+}
+
+
+define i1 @test17(i16* %P, i32 %I, i32 %J) {
+ %X = getelementptr inbounds i16* %P, i32 %I
+ %Y = getelementptr inbounds i16* %P, i32 %J
+ %C = icmp ult i16* %X, %Y
+ ret i1 %C
+; CHECK: @test17
+; CHECK: %C = icmp slt i32 %I, %J
+}
+
+define i1 @test18(i16* %P, i32 %I) {
+ %X = getelementptr inbounds i16* %P, i32 %I
+ %C = icmp ult i16* %X, %P
+ ret i1 %C
+; CHECK: @test18
+; CHECK: %C = icmp slt i32 %I, 0
+}
+
+define i32 @test19(i32* %P, i32 %A, i32 %B) {
+ %tmp.4 = getelementptr inbounds i32* %P, i32 %A
+ %tmp.9 = getelementptr inbounds i32* %P, i32 %B
+ %tmp.10 = icmp eq i32* %tmp.4, %tmp.9
+ %tmp.11 = zext i1 %tmp.10 to i32
+ ret i32 %tmp.11
+; CHECK: @test19
+; CHECK: icmp eq i32 %A, %B
+}
+
+define i32 @test20(i32* %P, i32 %A, i32 %B) {
+ %tmp.4 = getelementptr inbounds i32* %P, i32 %A
+ %tmp.6 = icmp eq i32* %tmp.4, %P
+ %tmp.7 = zext i1 %tmp.6 to i32
+ ret i32 %tmp.7
+; CHECK: @test20
+; CHECK: icmp eq i32 %A, 0
+}
+
+
+define i32 @test21() {
+ %pbob1 = alloca %intstruct
+ %pbob2 = getelementptr %intstruct* %pbob1
+ %pbobel = getelementptr %intstruct* %pbob2, i64 0, i32 0
+ %rval = load i32* %pbobel
+ ret i32 %rval
+; CHECK: @test21
+; CHECK: getelementptr %intstruct* %pbob1, i64 0, i32 0
+}
+
+
+@A = global i32 1 ; <i32*> [#uses=1]
+@B = global i32 2 ; <i32*> [#uses=1]
+
+define i1 @test22() {
+ %C = icmp ult i32* getelementptr (i32* @A, i64 1),
+ getelementptr (i32* @B, i64 2)
+ ret i1 %C
+; CHECK: @test22
+; CHECK: icmp ult (i32* getelementptr inbounds (i32* @A, i64 1), i32* getelementptr (i32* @B, i64 2))
+}
+
+
+%X = type { [10 x i32], float }
+
+define i1 @test23() {
+ %A = getelementptr %X* null, i64 0, i32 0, i64 0 ; <i32*> [#uses=1]
+ %B = icmp ne i32* %A, null ; <i1> [#uses=1]
+ ret i1 %B
+; CHECK: @test23
+; CHECK: ret i1 false
+}
+
+%"java/lang/Object" = type { %struct.llvm_java_object_base }
+%"java/lang/StringBuffer" = type { %"java/lang/Object", i32, { %"java/lang/Object", i32, [0 x i16] }*, i1 }
+%struct.llvm_java_object_base = type opaque
+
+define void @test24() {
+bc0:
+ %tmp53 = getelementptr %"java/lang/StringBuffer"* null, i32 0, i32 1 ; <i32*> [#uses=1]
+ store i32 0, i32* %tmp53
+ ret void
+; CHECK: @test24
+; CHECK: store i32 0, i32* getelementptr (%"java/lang/StringBuffer"* null, i64 0, i32 1)
+}
+
+define void @test25() {
+entry:
+ %tmp = getelementptr { i64, i64, i64, i64 }* null, i32 0, i32 3 ; <i64*> [#uses=1]
+ %tmp.upgrd.1 = load i64* %tmp ; <i64> [#uses=1]
+ %tmp8.ui = load i64* null ; <i64> [#uses=1]
+ %tmp8 = bitcast i64 %tmp8.ui to i64 ; <i64> [#uses=1]
+ %tmp9 = and i64 %tmp8, %tmp.upgrd.1 ; <i64> [#uses=1]
+ %sext = trunc i64 %tmp9 to i32 ; <i32> [#uses=1]
+ %tmp27.i = sext i32 %sext to i64 ; <i64> [#uses=1]
+ tail call void @foo25( i32 0, i64 %tmp27.i )
+ unreachable
+; CHECK: @test25
+}
+
+declare void @foo25(i32, i64)
+
+
+; PR1637
+define i1 @test26(i8* %arr) {
+ %X = getelementptr i8* %arr, i32 1
+ %Y = getelementptr i8* %arr, i32 1
+ %test = icmp uge i8* %X, %Y
+ ret i1 %test
+; CHECK: @test26
+; CHECK: ret i1 true
+}
+
+ %struct.__large_struct = type { [100 x i64] }
+ %struct.compat_siginfo = type { i32, i32, i32, { [29 x i32] } }
+ %struct.siginfo_t = type { i32, i32, i32, { { i32, i32, [0 x i8], %struct.sigval_t, i32 }, [88 x i8] } }
+ %struct.sigval_t = type { i8* }
+
+define i32 @test27(%struct.compat_siginfo* %to, %struct.siginfo_t* %from) {
+entry:
+ %from_addr = alloca %struct.siginfo_t*
+ %tmp344 = load %struct.siginfo_t** %from_addr, align 8
+ %tmp345 = getelementptr %struct.siginfo_t* %tmp344, i32 0, i32 3
+ %tmp346 = getelementptr { { i32, i32, [0 x i8], %struct.sigval_t, i32 }, [88 x i8] }* %tmp345, i32 0, i32 0
+ %tmp346347 = bitcast { i32, i32, [0 x i8], %struct.sigval_t, i32 }* %tmp346 to { i32, i32, %struct.sigval_t }*
+ %tmp348 = getelementptr { i32, i32, %struct.sigval_t }* %tmp346347, i32 0, i32 2
+ %tmp349 = getelementptr %struct.sigval_t* %tmp348, i32 0, i32 0
+ %tmp349350 = bitcast i8** %tmp349 to i32*
+ %tmp351 = load i32* %tmp349350, align 8
+ %tmp360 = call i32 asm sideeffect "...",
+ "=r,ir,*m,i,0,~{dirflag},~{fpsr},~{flags}"( i32 %tmp351,
+ %struct.__large_struct* null, i32 -14, i32 0 )
+ unreachable
+; CHECK: @test27
+}
+
+; PR1978
+ %struct.x = type <{ i8 }>
+@.str = internal constant [6 x i8] c"Main!\00"
+@.str1 = internal constant [12 x i8] c"destroy %p\0A\00"
+
+define i32 @test28() nounwind {
+entry:
+ %orientations = alloca [1 x [1 x %struct.x]]
+ %tmp3 = call i32 @puts( i8* getelementptr ([6 x i8]* @.str, i32 0, i32 0) ) nounwind
+ %tmp45 = getelementptr inbounds [1 x [1 x %struct.x]]* %orientations, i32 1, i32 0, i32 0
+ %orientations62 = getelementptr [1 x [1 x %struct.x]]* %orientations, i32 0, i32 0, i32 0
+ br label %bb10
+
+bb10:
+ %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb10 ]
+ %tmp.0.reg2mem.0.rec = mul i32 %indvar, -1
+ %tmp12.rec = add i32 %tmp.0.reg2mem.0.rec, -1
+ %tmp12 = getelementptr inbounds %struct.x* %tmp45, i32 %tmp12.rec
+ %tmp16 = call i32 (i8*, ...)* @printf( i8* getelementptr ([12 x i8]* @.str1, i32 0, i32 0), %struct.x* %tmp12 ) nounwind
+ %tmp84 = icmp eq %struct.x* %tmp12, %orientations62
+ %indvar.next = add i32 %indvar, 1
+ br i1 %tmp84, label %bb17, label %bb10
+
+bb17:
+ ret i32 0
+; CHECK: @test28
+; CHECK: icmp eq i32 %indvar, 0
+}
+
+declare i32 @puts(i8*)
+
+declare i32 @printf(i8*, ...)
+
+
+
+
+; rdar://6762290
+ %T = type <{ i64, i64, i64 }>
+define i32 @test29(i8* %start, i32 %X) nounwind {
+entry:
+ %tmp3 = load i64* null
+ %add.ptr = getelementptr i8* %start, i64 %tmp3
+ %tmp158 = load i32* null
+ %add.ptr159 = getelementptr %T* null, i32 %tmp158
+ %add.ptr209 = getelementptr i8* %start, i64 0
+ %add.ptr212 = getelementptr i8* %add.ptr209, i32 %X
+ %cmp214 = icmp ugt i8* %add.ptr212, %add.ptr
+ br i1 %cmp214, label %if.then216, label %if.end363
+
+if.then216:
+ ret i32 1
+
+if.end363:
+ ret i32 0
+; CHECK: @test29
+}
+
+
+; PR3694
+define i32 @test30(i32 %m, i32 %n) nounwind {
+entry:
+ %0 = alloca i32, i32 %n, align 4
+ %1 = bitcast i32* %0 to [0 x i32]*
+ call void @test30f(i32* %0) nounwind
+ %2 = getelementptr [0 x i32]* %1, i32 0, i32 %m
+ %3 = load i32* %2, align 4
+ ret i32 %3
+; CHECK: @test30
+; CHECK: getelementptr i32
+}
+
+declare void @test30f(i32*)
+
+
+
+define i1 @test31(i32* %A) {
+ %B = getelementptr i32* %A, i32 1
+ %C = getelementptr i32* %A, i64 1
+ %V = icmp eq i32* %B, %C
+ ret i1 %V
+; CHECK: @test31
+; CHECK: ret i1 true
+}
+
+
+; PR1345
+define i8* @test32(i8* %v) {
+ %A = alloca [4 x i8*], align 16
+ %B = getelementptr [4 x i8*]* %A, i32 0, i32 0
+ store i8* null, i8** %B
+ %C = bitcast [4 x i8*]* %A to { [16 x i8] }*
+ %D = getelementptr { [16 x i8] }* %C, i32 0, i32 0, i32 8
+ %E = bitcast i8* %D to i8**
+ store i8* %v, i8** %E
+ %F = getelementptr [4 x i8*]* %A, i32 0, i32 2
+ %G = load i8** %F
+ ret i8* %G
+; CHECK: @test32
+; CHECK: %D = getelementptr [4 x i8*]* %A, i64 0, i64 1
+; CHECK: %F = getelementptr [4 x i8*]* %A, i64 0, i64 2
+}
+
+; PR3290
+%struct.Key = type { { i32, i32 } }
+%struct.anon = type <{ i8, [3 x i8], i32 }>
+
+define i32 *@test33(%struct.Key *%A) {
+ %B = bitcast %struct.Key* %A to %struct.anon*
+ %C = getelementptr %struct.anon* %B, i32 0, i32 2
+ ret i32 *%C
+; CHECK: @test33
+; CHECK: getelementptr %struct.Key* %A, i64 0, i32 0, i32 1
+}
+
+
+
+ %T2 = type { i8*, i8 }
+define i8* @test34(i8* %Val, i64 %V) nounwind {
+entry:
+ %A = alloca %T2, align 8
+ %mrv_gep = bitcast %T2* %A to i64*
+ %B = getelementptr %T2* %A, i64 0, i32 0
+
+ store i64 %V, i64* %mrv_gep
+ %C = load i8** %B, align 8
+ ret i8* %C
+; CHECK: @test34
+; CHECK: %V.c = inttoptr i64 %V to i8*
+; CHECK: ret i8* %V.c
+}
+
+%t0 = type { i8*, [19 x i8] }
+%t1 = type { i8*, [0 x i8] }
+
+@array = external global [11 x i8]
+
+@s = external global %t0
+@"\01LC8" = external constant [17 x i8]
+
+; Instcombine should be able to fold this getelementptr.
+
+define i32 @test35() nounwind {
+ call i32 (i8*, ...)* @printf(i8* getelementptr ([17 x i8]* @"\01LC8", i32 0, i32 0),
+ i8* getelementptr (%t1* bitcast (%t0* @s to %t1*), i32 0, i32 1, i32 0)) nounwind
+ ret i32 0
+; CHECK: @test35
+; CHECK: call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([17 x i8]* @"\01LC8", i64 0, i64 0), i8* getelementptr inbounds (%t0* @s, i64 0, i32 1, i64 0)) nounwind
+}
+
+; Instcombine should constant-fold the GEP so that indices that have
+; static array extents are within bounds of those array extents.
+; In the below, -1 is not in the range [0,11). After the transformation,
+; the same address is computed, but 3 is in the range of [0,11).
+
+define i8* @test36() nounwind {
+ ret i8* getelementptr ([11 x i8]* @array, i32 0, i64 -1)
+; CHECK: @test36
+; CHECK: ret i8* getelementptr ([11 x i8]* @array, i64 1676976733973595601, i64 4)
+}
+
+; Instcombine shouldn't assume that gep(A,0,1) != gep(A,1,0).
+@A37 = external constant [1 x i8]
+define i1 @test37() nounwind {
+; CHECK: @test37
+; CHECK: ret i1 true
+ %t = icmp eq i8* getelementptr ([1 x i8]* @A37, i64 0, i64 1),
+ getelementptr ([1 x i8]* @A37, i64 1, i64 0)
+ ret i1 %t
+}
diff --git a/test/Transforms/InstCombine/hoist_instr.ll b/test/Transforms/InstCombine/hoist_instr.ll
new file mode 100644
index 0000000..fa451bc
--- /dev/null
+++ b/test/Transforms/InstCombine/hoist_instr.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+;; This tests that the div is hoisted into the then block.
+define i32 @foo(i1 %C, i32 %A, i32 %B) {
+entry:
+ br i1 %C, label %then, label %endif
+
+then: ; preds = %entry
+; CHECK: then:
+; CHECK-NEXT: sdiv i32
+ br label %endif
+
+endif: ; preds = %then, %entry
+ %X = phi i32 [ %A, %then ], [ 15, %entry ] ; <i32> [#uses=1]
+ %Y = sdiv i32 %X, 42 ; <i32> [#uses=1]
+ ret i32 %Y
+}
+
diff --git a/test/Transforms/InstCombine/icmp.ll b/test/Transforms/InstCombine/icmp.ll
new file mode 100644
index 0000000..c2234a1
--- /dev/null
+++ b/test/Transforms/InstCombine/icmp.ll
@@ -0,0 +1,123 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @test1(i32 %X) {
+entry:
+ icmp slt i32 %X, 0 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+; CHECK: @test1
+; CHECK: lshr i32 %X, 31
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test2(i32 %X) {
+entry:
+ icmp ult i32 %X, -2147483648 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+; CHECK: @test2
+; CHECK: lshr i32 %X, 31
+; CHECK-NEXT: xor i32
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test3(i32 %X) {
+entry:
+ icmp slt i32 %X, 0 ; <i1>:0 [#uses=1]
+ sext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+; CHECK: @test3
+; CHECK: ashr i32 %X, 31
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test4(i32 %X) {
+entry:
+ icmp ult i32 %X, -2147483648 ; <i1>:0 [#uses=1]
+ sext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+; CHECK: @test4
+; CHECK: ashr i32 %X, 31
+; CHECK-NEXT: xor i32
+; CHECK-NEXT: ret i32
+}
+
+; PR4837
+define <2 x i1> @test5(<2 x i64> %x) {
+entry:
+ %V = icmp eq <2 x i64> %x, undef
+ ret <2 x i1> %V
+; CHECK: @test5
+; CHECK: ret <2 x i1> undef
+}
+
+define i32 @test6(i32 %a, i32 %b) {
+ %c = icmp sle i32 %a, -1
+ %d = zext i1 %c to i32
+ %e = sub i32 0, %d
+ %f = and i32 %e, %b
+ ret i32 %f
+; CHECK: @test6
+; CHECK-NEXT: ashr i32 %a, 31
+; CHECK-NEXT: %f = and i32 %e, %b
+; CHECK-NEXT: ret i32 %f
+}
+
+
+define i1 @test7(i32 %x) {
+entry:
+ %a = add i32 %x, -1
+ %b = icmp ult i32 %a, %x
+ ret i1 %b
+; CHECK: @test7
+; CHECK: %b = icmp ne i32 %x, 0
+; CHECK: ret i1 %b
+}
+
+define i1 @test8(i32 %x){
+entry:
+ %a = add i32 %x, -1
+ %b = icmp eq i32 %a, %x
+ ret i1 %b
+; CHECK: @test8
+; CHECK: ret i1 false
+}
+
+define i1 @test9(i32 %x) {
+entry:
+ %a = add i32 %x, -2
+ %b = icmp ugt i32 %x, %a
+ ret i1 %b
+; CHECK: @test9
+; CHECK: icmp ugt i32 %x, 1
+; CHECK: ret i1 %b
+}
+
+define i1 @test10(i32 %x){
+entry:
+ %a = add i32 %x, -1
+ %b = icmp slt i32 %a, %x
+ ret i1 %b
+
+; CHECK: @test10
+; CHECK: %b = icmp ne i32 %x, -2147483648
+; CHECK: ret i1 %b
+}
+
+define i1 @test11(i32 %x) {
+ %a = add nsw i32 %x, 8
+ %b = icmp slt i32 %x, %a
+ ret i1 %b
+; CHECK: @test11
+; CHECK: ret i1 true
+}
+
+; PR6195
+define i1 @test12(i1 %A) {
+ %S = select i1 %A, i64 -4294967295, i64 8589934591
+ %B = icmp ne i64 bitcast (<2 x i32> <i32 1, i32 -1> to i64), %S
+ ret i1 %B
+; CHECK: @test12
+; CHECK-NEXT: %B = select i1
+; CHECK-NEXT: ret i1 %B
+}
diff --git a/test/Transforms/InstCombine/idioms.ll b/test/Transforms/InstCombine/idioms.ll
new file mode 100644
index 0000000..6b3567f
--- /dev/null
+++ b/test/Transforms/InstCombine/idioms.ll
@@ -0,0 +1,32 @@
+; RUN: opt -instcombine %s -S | FileCheck %s
+
+; Check that code corresponding to the following C function is
+; simplified into a single ASR operation:
+;
+; int test_asr(int a, int b) {
+; return a < 0 ? -(-a - 1 >> b) - 1 : a >> b;
+; }
+;
+define i32 @test_asr(i32 %a, i32 %b) {
+entry:
+ %c = icmp slt i32 %a, 0
+ br i1 %c, label %bb2, label %bb3
+
+bb2:
+ %t1 = sub i32 0, %a
+ %not = sub i32 %t1, 1
+ %d = ashr i32 %not, %b
+ %t2 = sub i32 0, %d
+ %not2 = sub i32 %t2, 1
+ br label %bb4
+bb3:
+ %e = ashr i32 %a, %b
+ br label %bb4
+bb4:
+ %f = phi i32 [ %not2, %bb2 ], [ %e, %bb3 ]
+ ret i32 %f
+; CHECK: @test_asr
+; CHECK: bb4:
+; CHECK: %f = ashr i32 %a, %b
+; CHECK: ret i32 %f
+}
diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll
new file mode 100644
index 0000000..08dcfa7
--- /dev/null
+++ b/test/Transforms/InstCombine/intrinsics.ll
@@ -0,0 +1,161 @@
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+%overflow.result = type {i8, i1}
+
+declare %overflow.result @llvm.uadd.with.overflow.i8(i8, i8)
+declare %overflow.result @llvm.umul.with.overflow.i8(i8, i8)
+declare double @llvm.powi.f64(double, i32) nounwind readonly
+declare i32 @llvm.cttz.i32(i32) nounwind readnone
+declare i32 @llvm.ctlz.i32(i32) nounwind readnone
+declare i32 @llvm.ctpop.i32(i32) nounwind readnone
+declare i8 @llvm.ctlz.i8(i8) nounwind readnone
+
+define i8 @test1(i8 %A, i8 %B) {
+ %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
+ %y = extractvalue %overflow.result %x, 0
+ ret i8 %y
+; CHECK: @test1
+; CHECK-NEXT: %y = add i8 %A, %B
+; CHECK-NEXT: ret i8 %y
+}
+
+define i8 @test2(i8 %A, i8 %B, i1* %overflowPtr) {
+ %and.A = and i8 %A, 127
+ %and.B = and i8 %B, 127
+ %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %and.A, i8 %and.B)
+ %y = extractvalue %overflow.result %x, 0
+ %z = extractvalue %overflow.result %x, 1
+ store i1 %z, i1* %overflowPtr
+ ret i8 %y
+; CHECK: @test2
+; CHECK-NEXT: %and.A = and i8 %A, 127
+; CHECK-NEXT: %and.B = and i8 %B, 127
+; CHECK-NEXT: %1 = add nuw i8 %and.A, %and.B
+; CHECK-NEXT: store i1 false, i1* %overflowPtr
+; CHECK-NEXT: ret i8 %1
+}
+
+define i8 @test3(i8 %A, i8 %B, i1* %overflowPtr) {
+ %or.A = or i8 %A, -128
+ %or.B = or i8 %B, -128
+ %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %or.A, i8 %or.B)
+ %y = extractvalue %overflow.result %x, 0
+ %z = extractvalue %overflow.result %x, 1
+ store i1 %z, i1* %overflowPtr
+ ret i8 %y
+; CHECK: @test3
+; CHECK-NEXT: %or.A = or i8 %A, -128
+; CHECK-NEXT: %or.B = or i8 %B, -128
+; CHECK-NEXT: %1 = add i8 %or.A, %or.B
+; CHECK-NEXT: store i1 true, i1* %overflowPtr
+; CHECK-NEXT: ret i8 %1
+}
+
+define i8 @test4(i8 %A, i1* %overflowPtr) {
+ %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 undef, i8 %A)
+ %y = extractvalue %overflow.result %x, 0
+ %z = extractvalue %overflow.result %x, 1
+ store i1 %z, i1* %overflowPtr
+ ret i8 %y
+; CHECK: @test4
+; CHECK-NEXT: ret i8 undef
+}
+
+define i8 @test5(i8 %A, i1* %overflowPtr) {
+ %x = call %overflow.result @llvm.umul.with.overflow.i8(i8 0, i8 %A)
+ %y = extractvalue %overflow.result %x, 0
+ %z = extractvalue %overflow.result %x, 1
+ store i1 %z, i1* %overflowPtr
+ ret i8 %y
+; CHECK: @test5
+; CHECK-NEXT: store i1 false, i1* %overflowPtr
+; CHECK-NEXT: ret i8 0
+}
+
+define i8 @test6(i8 %A, i1* %overflowPtr) {
+ %x = call %overflow.result @llvm.umul.with.overflow.i8(i8 1, i8 %A)
+ %y = extractvalue %overflow.result %x, 0
+ %z = extractvalue %overflow.result %x, 1
+ store i1 %z, i1* %overflowPtr
+ ret i8 %y
+; CHECK: @test6
+; CHECK-NEXT: store i1 false, i1* %overflowPtr
+; CHECK-NEXT: ret i8 %A
+}
+
+define void @powi(double %V, double *%P) {
+entry:
+ %A = tail call double @llvm.powi.f64(double %V, i32 -1) nounwind
+ volatile store double %A, double* %P
+
+ %B = tail call double @llvm.powi.f64(double %V, i32 0) nounwind
+ volatile store double %B, double* %P
+
+ %C = tail call double @llvm.powi.f64(double %V, i32 1) nounwind
+ volatile store double %C, double* %P
+ ret void
+; CHECK: @powi
+; CHECK: %A = fdiv double 1.0{{.*}}, %V
+; CHECK: volatile store double %A,
+; CHECK: volatile store double 1.0
+; CHECK: volatile store double %V
+}
+
+define i32 @cttz(i32 %a) {
+entry:
+ %or = or i32 %a, 8
+ %and = and i32 %or, -8
+ %count = tail call i32 @llvm.cttz.i32(i32 %and) nounwind readnone
+ ret i32 %count
+; CHECK: @cttz
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret i32 3
+}
+
+define i8 @ctlz(i8 %a) {
+entry:
+ %or = or i8 %a, 32
+ %and = and i8 %or, 63
+ %count = tail call i8 @llvm.ctlz.i8(i8 %and) nounwind readnone
+ ret i8 %count
+; CHECK: @ctlz
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret i8 2
+}
+
+define void @cmp.simplify(i32 %a, i32 %b, i1* %c) {
+entry:
+ %lz = tail call i32 @llvm.ctlz.i32(i32 %a) nounwind readnone
+ %lz.cmp = icmp eq i32 %lz, 32
+ volatile store i1 %lz.cmp, i1* %c
+ %tz = tail call i32 @llvm.cttz.i32(i32 %a) nounwind readnone
+ %tz.cmp = icmp ne i32 %tz, 32
+ volatile store i1 %tz.cmp, i1* %c
+ %pop = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
+ %pop.cmp = icmp eq i32 %pop, 0
+ volatile store i1 %pop.cmp, i1* %c
+ ret void
+; CHECK: @cmp.simplify
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %lz.cmp = icmp eq i32 %a, 0
+; CHECK-NEXT: volatile store i1 %lz.cmp, i1* %c
+; CHECK-NEXT: %tz.cmp = icmp ne i32 %a, 0
+; CHECK-NEXT: volatile store i1 %tz.cmp, i1* %c
+; CHECK-NEXT: %pop.cmp = icmp eq i32 %b, 0
+; CHECK-NEXT: volatile store i1 %pop.cmp, i1* %c
+}
+
+
+define i32 @cttz_simplify1(i32 %x) nounwind readnone ssp {
+ %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x) ; <i32> [#uses=1]
+ %shr3 = lshr i32 %tmp1, 5 ; <i32> [#uses=1]
+ ret i32 %shr3
+
+; CHECK: @cttz_simplify1
+; CHECK: icmp eq i32 %x, 0
+; CHECK-NEXT: zext i1
+; CHECK-NEXT: ret i32
+}
+
+declare i32 @llvm.ctlz.i32(i32) nounwind readnone
+
diff --git a/test/Transforms/InstCombine/invariant.ll b/test/Transforms/InstCombine/invariant.ll
new file mode 100644
index 0000000..c67ad33
--- /dev/null
+++ b/test/Transforms/InstCombine/invariant.ll
@@ -0,0 +1,16 @@
+; Test to make sure unused llvm.invariant.start calls are not trivially eliminated
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+declare void @g(i8*)
+
+declare { }* @llvm.invariant.start(i64, i8* nocapture) nounwind readonly
+
+define i8 @f() {
+ %a = alloca i8 ; <i8*> [#uses=4]
+ store i8 0, i8* %a
+ %i = call { }* @llvm.invariant.start(i64 1, i8* %a) ; <{ }*> [#uses=0]
+ ; CHECK: call { }* @llvm.invariant.start
+ call void @g(i8* %a)
+ %r = load i8* %a ; <i8> [#uses=1]
+ ret i8 %r
+}
diff --git a/test/Transforms/InstCombine/known_align.ll b/test/Transforms/InstCombine/known_align.ll
new file mode 100644
index 0000000..5382abf
--- /dev/null
+++ b/test/Transforms/InstCombine/known_align.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -instcombine -S | grep {align 1}
+; END.
+
+ %struct.p = type <{ i8, i32 }>
+@t = global %struct.p <{ i8 1, i32 10 }> ; <%struct.p*> [#uses=1]
+@u = weak global %struct.p zeroinitializer ; <%struct.p*> [#uses=1]
+
+define i32 @main() {
+entry:
+ %retval = alloca i32, align 4 ; <i32*> [#uses=2]
+ %tmp = alloca i32, align 4 ; <i32*> [#uses=2]
+ %tmp1 = alloca i32, align 4 ; <i32*> [#uses=3]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp3 = load i32* getelementptr (%struct.p* @t, i32 0, i32 1), align 1 ; <i32> [#uses=1]
+ store i32 %tmp3, i32* %tmp1, align 4
+ %tmp5 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ store i32 %tmp5, i32* getelementptr (%struct.p* @u, i32 0, i32 1), align 1
+ %tmp6 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ store i32 %tmp6, i32* %tmp, align 4
+ %tmp7 = load i32* %tmp, align 4 ; <i32> [#uses=1]
+ store i32 %tmp7, i32* %retval, align 4
+ br label %return
+
+return: ; preds = %entry
+ %retval8 = load i32* %retval ; <i32> [#uses=1]
+ ret i32 %retval8
+}
diff --git a/test/Transforms/InstCombine/load-cmp.ll b/test/Transforms/InstCombine/load-cmp.ll
new file mode 100644
index 0000000..fe5df92
--- /dev/null
+++ b/test/Transforms/InstCombine/load-cmp.ll
@@ -0,0 +1,112 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+@G16 = internal constant [10 x i16] [i16 35, i16 82, i16 69, i16 81, i16 85,
+ i16 73, i16 82, i16 69, i16 68, i16 0]
+@GD = internal constant [6 x double]
+ [double -10.0, double 1.0, double 4.0, double 2.0, double -20.0, double -40.0]
+
+define i1 @test1(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp eq i16 %Q, 0
+ ret i1 %R
+; CHECK: @test1
+; CHECK-NEXT: %R = icmp eq i32 %X, 9
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test2(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp slt i16 %Q, 85
+ ret i1 %R
+; CHECK: @test2
+; CHECK-NEXT: %R = icmp ne i32 %X, 4
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test3(i32 %X) {
+ %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %Q = load double* %P
+ %R = fcmp oeq double %Q, 1.0
+ ret i1 %R
+; CHECK: @test3
+; CHECK-NEXT: %R = icmp eq i32 %X, 1
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test4(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp sle i16 %Q, 73
+ ret i1 %R
+; CHECK: @test4
+; CHECK-NEXT: lshr i32 933, %X
+; CHECK-NEXT: and i32 {{.*}}, 1
+; CHECK-NEXT: %R = icmp ne i32 {{.*}}, 0
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test5(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp eq i16 %Q, 69
+ ret i1 %R
+; CHECK: @test5
+; CHECK-NEXT: icmp eq i32 %X, 2
+; CHECK-NEXT: icmp eq i32 %X, 7
+; CHECK-NEXT: %R = or i1
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test6(i32 %X) {
+ %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %Q = load double* %P
+ %R = fcmp ogt double %Q, 0.0
+ ret i1 %R
+; CHECK: @test6
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ult i32 {{.*}}, 3
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test7(i32 %X) {
+ %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %Q = load double* %P
+ %R = fcmp olt double %Q, 0.0
+ ret i1 %R
+; CHECK: @test7
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ugt i32 {{.*}}, 2
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test8(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = and i16 %Q, 3
+ %S = icmp eq i16 %R, 0
+ ret i1 %S
+; CHECK: @test8
+; CHECK-NEXT: add i32 %X, -8
+; CHECK-NEXT: %S = icmp ult i32 {{.*}}, 2
+; CHECK-NEXT: ret i1 %S
+}
+
+@GA = internal constant [4 x { i32, i32 } ] [
+ { i32, i32 } { i32 1, i32 0 },
+ { i32, i32 } { i32 2, i32 1 },
+ { i32, i32 } { i32 3, i32 1 },
+ { i32, i32 } { i32 4, i32 0 }
+]
+
+define i1 @test9(i32 %X) {
+ %P = getelementptr inbounds [4 x { i32, i32 } ]* @GA, i32 0, i32 %X, i32 1
+ %Q = load i32* %P
+ %R = icmp eq i32 %Q, 1
+ ret i1 %R
+; CHECK: @test9
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ult i32 {{.*}}, 2
+; CHECK-NEXT: ret i1 %R
+}
diff --git a/test/Transforms/InstCombine/load-select.ll b/test/Transforms/InstCombine/load-select.ll
new file mode 100644
index 0000000..f3d83dc
--- /dev/null
+++ b/test/Transforms/InstCombine/load-select.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
+
+@a = constant [2 x i32] [i32 3, i32 6] ; <[2 x i32]*> [#uses=2]
+
+define i32 @b(i32 %y) nounwind readonly {
+; CHECK: @b
+; CHECK-NOT: load
+; CHECK: ret i32
+entry:
+ %0 = icmp eq i32 %y, 0 ; <i1> [#uses=1]
+ %storemerge = select i1 %0, i32* getelementptr inbounds ([2 x i32]* @a, i32 0, i32 1), i32* getelementptr inbounds ([2 x i32]* @a, i32 0, i32 0) ; <i32*> [#uses=1]
+ %1 = load i32* %storemerge, align 4 ; <i32> [#uses=1]
+ ret i32 %1
+}
diff --git a/test/Transforms/InstCombine/load.ll b/test/Transforms/InstCombine/load.ll
new file mode 100644
index 0000000..75c62a8
--- /dev/null
+++ b/test/Transforms/InstCombine/load.ll
@@ -0,0 +1,87 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | not grep load
+
+@X = constant i32 42 ; <i32*> [#uses=2]
+@X2 = constant i32 47 ; <i32*> [#uses=1]
+@Y = constant [2 x { i32, float }] [ { i32, float } { i32 12, float 1.000000e+00 }, { i32, float } { i32 37, float 0x3FF3B2FEC0000000 } ] ; <[2 x { i32, float }]*> [#uses=2]
+@Z = constant [2 x { i32, float }] zeroinitializer ; <[2 x { i32, float }]*> [#uses=1]
+
+define i32 @test1() {
+ %B = load i32* @X ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define float @test2() {
+ %A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1 ; <float*> [#uses=1]
+ %B = load float* %A ; <float> [#uses=1]
+ ret float %B
+}
+
+define i32 @test3() {
+ %A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0 ; <i32*> [#uses=1]
+ %B = load i32* %A ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test4() {
+ %A = getelementptr [2 x { i32, float }]* @Z, i64 0, i64 1, i32 0 ; <i32*> [#uses=1]
+ %B = load i32* %A ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test5(i1 %C) {
+ %Y = select i1 %C, i32* @X, i32* @X2 ; <i32*> [#uses=1]
+ %Z = load i32* %Y ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test7(i32 %X) {
+ %V = getelementptr i32* null, i32 %X ; <i32*> [#uses=1]
+ %R = load i32* %V ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @test8(i32* %P) {
+ store i32 1, i32* %P
+ %X = load i32* %P ; <i32> [#uses=1]
+ ret i32 %X
+}
+
+define i32 @test9(i32* %P) {
+ %X = load i32* %P ; <i32> [#uses=1]
+ %Y = load i32* %P ; <i32> [#uses=1]
+ %Z = sub i32 %X, %Y ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test10(i1 %C.upgrd.1, i32* %P, i32* %Q) {
+ br i1 %C.upgrd.1, label %T, label %F
+T: ; preds = %0
+ store i32 1, i32* %Q
+ store i32 0, i32* %P
+ br label %C
+F: ; preds = %0
+ store i32 0, i32* %P
+ br label %C
+C: ; preds = %F, %T
+ %V = load i32* %P ; <i32> [#uses=1]
+ ret i32 %V
+}
+
+define double @test11(double* %p) {
+ %t0 = getelementptr double* %p, i32 1
+ store double 2.0, double* %t0
+ %t1 = getelementptr double* %p, i32 1
+ %x = load double* %t1
+ ret double %x
+}
+
+define i32 @test12(i32* %P) {
+ %A = alloca i32
+ store i32 123, i32* %A
+ ; Cast the result of the load not the source
+ %Q = bitcast i32* %A to i32*
+ %V = load i32* %Q
+ ret i32 %V
+}
diff --git a/test/Transforms/InstCombine/load2.ll b/test/Transforms/InstCombine/load2.ll
new file mode 100644
index 0000000..611b0fb
--- /dev/null
+++ b/test/Transforms/InstCombine/load2.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | not grep load
+
+@GLOBAL = internal constant [4 x i32] zeroinitializer
+
+
+define <16 x i8> @foo(<2 x i64> %x) {
+entry:
+ %tmp = load <16 x i8> * bitcast ([4 x i32]* @GLOBAL to <16 x i8>*)
+ ret <16 x i8> %tmp
+}
+
diff --git a/test/Transforms/InstCombine/load3.ll b/test/Transforms/InstCombine/load3.ll
new file mode 100644
index 0000000..9c87316
--- /dev/null
+++ b/test/Transforms/InstCombine/load3.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep load | count 1
+
+; Instcombine should be able to do trivial CSE of loads.
+
+declare void @use(double %n)
+define void @bar(double* %p) {
+ %t0 = getelementptr double* %p, i32 1
+ %y = load double* %t0
+ %t1 = getelementptr double* %p, i32 1
+ %x = load double* %t1
+ call void @use(double %x)
+ call void @use(double %y)
+ ret void
+}
diff --git a/test/Transforms/InstCombine/loadstore-alignment.ll b/test/Transforms/InstCombine/loadstore-alignment.ll
new file mode 100644
index 0000000..1d932d2
--- /dev/null
+++ b/test/Transforms/InstCombine/loadstore-alignment.ll
@@ -0,0 +1,67 @@
+; RUN: opt < %s -instcombine -S | grep {, align 16} | count 14
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+@x = external global <2 x i64>, align 16
+@xx = external global [13 x <2 x i64>], align 16
+
+define <2 x i64> @static_hem() {
+ %t = getelementptr <2 x i64>* @x, i32 7
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @hem(i32 %i) {
+ %t = getelementptr <2 x i64>* @x, i32 %i
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @hem_2d(i32 %i, i32 %j) {
+ %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @foo() {
+ %tmp1 = load <2 x i64>* @x, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @bar() {
+ %t = alloca <2 x i64>
+ call void @kip(<2 x i64>* %t)
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define void @static_hem_store(<2 x i64> %y) {
+ %t = getelementptr <2 x i64>* @x, i32 7
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @hem_store(i32 %i, <2 x i64> %y) {
+ %t = getelementptr <2 x i64>* @x, i32 %i
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @hem_2d_store(i32 %i, i32 %j, <2 x i64> %y) {
+ %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @foo_store(<2 x i64> %y) {
+ store <2 x i64> %y, <2 x i64>* @x, align 1
+ ret void
+}
+
+define void @bar_store(<2 x i64> %y) {
+ %t = alloca <2 x i64>
+ call void @kip(<2 x i64>* %t)
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+declare void @kip(<2 x i64>* %t)
diff --git a/test/Transforms/InstCombine/logical-select.ll b/test/Transforms/InstCombine/logical-select.ll
new file mode 100644
index 0000000..bb59817
--- /dev/null
+++ b/test/Transforms/InstCombine/logical-select.ll
@@ -0,0 +1,68 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+
+define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+ %e = icmp slt i32 %a, %b
+ %f = sext i1 %e to i32
+ %g = and i32 %c, %f
+ %h = xor i32 %f, -1
+ %i = and i32 %d, %h
+ %j = or i32 %g, %i
+ ret i32 %j
+; CHECK: %e = icmp slt i32 %a, %b
+; CHECK: %j = select i1 %e, i32 %c, i32 %d
+; CHECK: ret i32 %j
+}
+define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+ %e = icmp slt i32 %a, %b
+ %f = sext i1 %e to i32
+ %g = and i32 %c, %f
+ %h = xor i32 %f, -1
+ %i = and i32 %d, %h
+ %j = or i32 %i, %g
+ ret i32 %j
+; CHECK: %e = icmp slt i32 %a, %b
+; CHECK: %j = select i1 %e, i32 %c, i32 %d
+; CHECK: ret i32 %j
+}
+
+define i32 @goo(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+entry:
+ %0 = icmp slt i32 %a, %b
+ %iftmp.0.0 = select i1 %0, i32 -1, i32 0
+ %1 = and i32 %iftmp.0.0, %c
+ %not = xor i32 %iftmp.0.0, -1
+ %2 = and i32 %not, %d
+ %3 = or i32 %1, %2
+ ret i32 %3
+; CHECK: %0 = icmp slt i32 %a, %b
+; CHECK: %1 = select i1 %0, i32 %c, i32 %d
+; CHECK: ret i32 %1
+}
+define i32 @poo(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+entry:
+ %0 = icmp slt i32 %a, %b
+ %iftmp.0.0 = select i1 %0, i32 -1, i32 0
+ %1 = and i32 %iftmp.0.0, %c
+ %iftmp = select i1 %0, i32 0, i32 -1
+ %2 = and i32 %iftmp, %d
+ %3 = or i32 %1, %2
+ ret i32 %3
+; CHECK: %0 = icmp slt i32 %a, %b
+; CHECK: %1 = select i1 %0, i32 %c, i32 %d
+; CHECK: ret i32 %1
+}
+
+define i32 @par(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+entry:
+ %0 = icmp slt i32 %a, %b
+ %iftmp.1.0 = select i1 %0, i32 -1, i32 0
+ %1 = and i32 %iftmp.1.0, %c
+ %not = xor i32 %iftmp.1.0, -1
+ %2 = and i32 %not, %d
+ %3 = or i32 %1, %2
+ ret i32 %3
+; CHECK: %0 = icmp slt i32 %a, %b
+; CHECK: %1 = select i1 %0, i32 %c, i32 %d
+; CHECK: ret i32 %1
+}
diff --git a/test/Transforms/InstCombine/lshr-phi.ll b/test/Transforms/InstCombine/lshr-phi.ll
new file mode 100644
index 0000000..76a113f
--- /dev/null
+++ b/test/Transforms/InstCombine/lshr-phi.ll
@@ -0,0 +1,35 @@
+; RUN: opt < %s -instcombine -S > %t
+; RUN: not grep lshr %t
+; RUN: grep add %t | count 1
+
+; Instcombine should be able to eliminate the lshr, because only
+; bits in the operand which might be non-zero will be shifted
+; off the end.
+
+define i32 @hash_string(i8* nocapture %key) nounwind readonly {
+entry:
+ %t0 = load i8* %key, align 1 ; <i8> [#uses=1]
+ %t1 = icmp eq i8 %t0, 0 ; <i1> [#uses=1]
+ br i1 %t1, label %bb2, label %bb
+
+bb: ; preds = %bb, %entry
+ %indvar = phi i64 [ 0, %entry ], [ %tmp, %bb ] ; <i64> [#uses=2]
+ %k.04 = phi i32 [ 0, %entry ], [ %t8, %bb ] ; <i32> [#uses=2]
+ %cp.05 = getelementptr i8* %key, i64 %indvar ; <i8*> [#uses=1]
+ %t2 = shl i32 %k.04, 1 ; <i32> [#uses=1]
+ %t3 = lshr i32 %k.04, 14 ; <i32> [#uses=1]
+ %t4 = add i32 %t2, %t3 ; <i32> [#uses=1]
+ %t5 = load i8* %cp.05, align 1 ; <i8> [#uses=1]
+ %t6 = sext i8 %t5 to i32 ; <i32> [#uses=1]
+ %t7 = xor i32 %t6, %t4 ; <i32> [#uses=1]
+ %t8 = and i32 %t7, 16383 ; <i32> [#uses=2]
+ %tmp = add i64 %indvar, 1 ; <i64> [#uses=2]
+ %scevgep = getelementptr i8* %key, i64 %tmp ; <i8*> [#uses=1]
+ %t9 = load i8* %scevgep, align 1 ; <i8> [#uses=1]
+ %t10 = icmp eq i8 %t9, 0 ; <i1> [#uses=1]
+ br i1 %t10, label %bb2, label %bb
+
+bb2: ; preds = %bb, %entry
+ %k.0.lcssa = phi i32 [ 0, %entry ], [ %t8, %bb ] ; <i32> [#uses=1]
+ ret i32 %k.0.lcssa
+}
diff --git a/test/Transforms/InstCombine/malloc-free-delete.ll b/test/Transforms/InstCombine/malloc-free-delete.ll
new file mode 100644
index 0000000..a4b7496
--- /dev/null
+++ b/test/Transforms/InstCombine/malloc-free-delete.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -globaldce -S | FileCheck %s
+; PR1201
+define i32 @main(i32 %argc, i8** %argv) {
+ %c_19 = alloca i8* ; <i8**> [#uses=2]
+ %malloc_206 = malloc i8, i32 10 ; <i8*> [#uses=1]
+; CHECK-NOT: malloc
+ store i8* %malloc_206, i8** %c_19
+ %tmp_207 = load i8** %c_19 ; <i8*> [#uses=1]
+ free i8* %tmp_207
+; CHECK-NOT: free
+ ret i32 0
+; CHECK: ret i32 0
+}
diff --git a/test/Transforms/InstCombine/malloc.ll b/test/Transforms/InstCombine/malloc.ll
new file mode 100644
index 0000000..b6ebbea
--- /dev/null
+++ b/test/Transforms/InstCombine/malloc.ll
@@ -0,0 +1,7 @@
+; test that malloc's with a constant argument are promoted to array allocations
+; RUN: opt < %s -instcombine -S | grep getelementptr
+
+define i32* @test() {
+ %X = malloc i32, i32 4
+ ret i32* %X
+}
diff --git a/test/Transforms/InstCombine/malloc2.ll b/test/Transforms/InstCombine/malloc2.ll
new file mode 100644
index 0000000..8462dac
--- /dev/null
+++ b/test/Transforms/InstCombine/malloc2.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; PR1313
+
+define i32 @test1(i32 %argc, i8* %argv, i8* %envp) {
+ %tmp15.i.i.i23 = malloc [2564 x i32] ; <[2564 x i32]*> [#uses=1]
+; CHECK-NOT: call i8* @malloc
+ %c = icmp eq [2564 x i32]* %tmp15.i.i.i23, null ; <i1>:0 [#uses=1]
+ %retval = zext i1 %c to i32 ; <i32> [#uses=1]
+ ret i32 %retval
+; CHECK: ret i32 0
+}
+
+define i32 @test2(i32 %argc, i8* %argv, i8* %envp) {
+ %tmp15.i.i.i23 = malloc [2564 x i32] ; <[2564 x i32]*> [#uses=1]
+; CHECK-NOT: call i8* @malloc
+ %X = bitcast [2564 x i32]* %tmp15.i.i.i23 to i32*
+ %c = icmp ne i32* %X, null
+ %retval = zext i1 %c to i32 ; <i32> [#uses=1]
+ ret i32 %retval
+; CHECK: ret i32 1
+}
+
diff --git a/test/Transforms/InstCombine/malloc3.ll b/test/Transforms/InstCombine/malloc3.ll
new file mode 100644
index 0000000..f1c0cae
--- /dev/null
+++ b/test/Transforms/InstCombine/malloc3.ll
@@ -0,0 +1,26 @@
+; RUN: opt < %s -instcombine -S | not grep load
+; PR1728
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+ %struct.foo = type { %struct.foo*, [10 x i32] }
+@.str = internal constant [21 x i8] c"tmp = %p, next = %p\0A\00" ; <[21 x i8]*> [#uses=1]
+
+define i32 @main() {
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp1 = malloc i8, i32 44 ; <i8*> [#uses=1]
+ %tmp12 = bitcast i8* %tmp1 to %struct.foo* ; <%struct.foo*> [#uses=3]
+ %tmp3 = malloc i8, i32 44 ; <i8*> [#uses=1]
+ %tmp34 = bitcast i8* %tmp3 to %struct.foo* ; <%struct.foo*> [#uses=1]
+ %tmp6 = getelementptr %struct.foo* %tmp12, i32 0, i32 0 ; <%struct.foo**> [#uses=1]
+ store %struct.foo* %tmp34, %struct.foo** %tmp6, align 4
+ %tmp8 = getelementptr %struct.foo* %tmp12, i32 0, i32 0 ; <%struct.foo**> [#uses=1]
+ %tmp9 = load %struct.foo** %tmp8, align 4 ; <%struct.foo*> [#uses=1]
+ %tmp10 = getelementptr [21 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp13 = call i32 (i8*, ...)* @printf( i8* %tmp10, %struct.foo* %tmp12, %struct.foo* %tmp9 ) ; <i32> [#uses=0]
+ ret i32 undef
+}
+
+declare i32 @printf(i8*, ...)
+
diff --git a/test/Transforms/InstCombine/memcpy-to-load.ll b/test/Transforms/InstCombine/memcpy-to-load.ll
new file mode 100644
index 0000000..ebb8711
--- /dev/null
+++ b/test/Transforms/InstCombine/memcpy-to-load.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {load double}
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+
+define void @foo(double* %X, double* %Y) {
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp2 = bitcast double* %X to i8* ; <i8*> [#uses=1]
+ %tmp13 = bitcast double* %Y to i8* ; <i8*> [#uses=1]
+ call void @llvm.memcpy.i32( i8* %tmp2, i8* %tmp13, i32 8, i32 1 )
+ ret void
+}
+
+declare void @llvm.memcpy.i32(i8*, i8*, i32, i32) nounwind
diff --git a/test/Transforms/InstCombine/memcpy.ll b/test/Transforms/InstCombine/memcpy.ll
new file mode 100644
index 0000000..2e7b2c0
--- /dev/null
+++ b/test/Transforms/InstCombine/memcpy.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
+
+define void @test4(i8* %a) {
+ tail call void @llvm.memcpy.i32( i8* %a, i8* %a, i32 100, i32 1 )
+ ret void
+}
+; CHECK: define void @test4
+; CHECK-NEXT: ret void
diff --git a/test/Transforms/InstCombine/memmove.ll b/test/Transforms/InstCombine/memmove.ll
new file mode 100644
index 0000000..1806cfc
--- /dev/null
+++ b/test/Transforms/InstCombine/memmove.ll
@@ -0,0 +1,42 @@
+; This test makes sure that memmove instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep {call void @llvm.memmove}
+
+@S = internal constant [33 x i8] c"panic: restorelist inconsistency\00" ; <[33 x i8]*> [#uses=1]
+@h = constant [2 x i8] c"h\00" ; <[2 x i8]*> [#uses=1]
+@hel = constant [4 x i8] c"hel\00" ; <[4 x i8]*> [#uses=1]
+@hello_u = constant [8 x i8] c"hello_u\00" ; <[8 x i8]*> [#uses=1]
+
+
+declare void @llvm.memmove.i32(i8*, i8*, i32, i32)
+
+define void @test1(i8* %A, i8* %B, i32 %N) {
+ call void @llvm.memmove.i32( i8* %A, i8* %B, i32 0, i32 1 )
+ ret void
+}
+
+define void @test2(i8* %A, i32 %N) {
+ ;; dest can't alias source since we can't write to source!
+ call void @llvm.memmove.i32( i8* %A, i8* getelementptr ([33 x i8]* @S, i32 0, i32 0), i32 %N, i32 1 )
+ ret void
+}
+
+define i32 @test3() {
+ %h_p = getelementptr [2 x i8]* @h, i32 0, i32 0 ; <i8*> [#uses=1]
+ %hel_p = getelementptr [4 x i8]* @hel, i32 0, i32 0 ; <i8*> [#uses=1]
+ %hello_u_p = getelementptr [8 x i8]* @hello_u, i32 0, i32 0 ; <i8*> [#uses=1]
+ %target = alloca [1024 x i8] ; <[1024 x i8]*> [#uses=1]
+ %target_p = getelementptr [1024 x i8]* %target, i32 0, i32 0 ; <i8*> [#uses=3]
+ call void @llvm.memmove.i32( i8* %target_p, i8* %h_p, i32 2, i32 2 )
+ call void @llvm.memmove.i32( i8* %target_p, i8* %hel_p, i32 4, i32 4 )
+ call void @llvm.memmove.i32( i8* %target_p, i8* %hello_u_p, i32 8, i32 8 )
+ ret i32 0
+}
+
+; PR2370
+define void @test4(i8* %a) {
+ tail call void @llvm.memmove.i32( i8* %a, i8* %a, i32 100, i32 1 )
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/memset.ll b/test/Transforms/InstCombine/memset.ll
new file mode 100644
index 0000000..8e85694
--- /dev/null
+++ b/test/Transforms/InstCombine/memset.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | not grep {call.*llvm.memset}
+
+declare void @llvm.memset.i32(i8*, i8, i32, i32)
+
+define i32 @main() {
+ %target = alloca [1024 x i8] ; <[1024 x i8]*> [#uses=1]
+ %target_p = getelementptr [1024 x i8]* %target, i32 0, i32 0 ; <i8*> [#uses=5]
+ call void @llvm.memset.i32( i8* %target_p, i8 1, i32 0, i32 1 )
+ call void @llvm.memset.i32( i8* %target_p, i8 1, i32 1, i32 1 )
+ call void @llvm.memset.i32( i8* %target_p, i8 1, i32 2, i32 2 )
+ call void @llvm.memset.i32( i8* %target_p, i8 1, i32 4, i32 4 )
+ call void @llvm.memset.i32( i8* %target_p, i8 1, i32 8, i32 8 )
+ ret i32 0
+}
+
diff --git a/test/Transforms/InstCombine/mul-masked-bits.ll b/test/Transforms/InstCombine/mul-masked-bits.ll
new file mode 100644
index 0000000..a43d5f2
--- /dev/null
+++ b/test/Transforms/InstCombine/mul-masked-bits.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep ashr
+
+define i32 @foo(i32 %x, i32 %y) {
+ %a = and i32 %x, 7
+ %b = and i32 %y, 7
+ %c = mul i32 %a, %b
+ %d = shl i32 %c, 26
+ %e = ashr i32 %d, 26
+ ret i32 %e
+}
diff --git a/test/Transforms/InstCombine/mul.ll b/test/Transforms/InstCombine/mul.ll
new file mode 100644
index 0000000..53a5643
--- /dev/null
+++ b/test/Transforms/InstCombine/mul.ll
@@ -0,0 +1,116 @@
+; This test makes sure that mul instructions are properly eliminated.
+; RUN: opt < %s -instcombine -S | not grep mul
+
+define i32 @test1(i32 %A) {
+ %B = mul i32 %A, 1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test2(i32 %A) {
+ ; Should convert to an add instruction
+ %B = mul i32 %A, 2 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test3(i32 %A) {
+ ; This should disappear entirely
+ %B = mul i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define double @test4(double %A) {
+ ; This is safe for FP
+ %B = fmul double 1.000000e+00, %A ; <double> [#uses=1]
+ ret double %B
+}
+
+define i32 @test5(i32 %A) {
+ %B = mul i32 %A, 8 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i8 @test6(i8 %A) {
+ %B = mul i8 %A, 8 ; <i8> [#uses=1]
+ %C = mul i8 %B, 8 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i32 @test7(i32 %i) {
+ %tmp = mul i32 %i, -1 ; <i32> [#uses=1]
+ ret i32 %tmp
+}
+
+define i64 @test8(i64 %i) {
+ ; tmp = sub 0, %i
+ %j = mul i64 %i, -1 ; <i64> [#uses=1]
+ ret i64 %j
+}
+
+define i32 @test9(i32 %i) {
+ ; %j = sub 0, %i
+ %j = mul i32 %i, -1 ; <i32> [#uses=1]
+ ret i32 %j
+}
+
+define i32 @test10(i32 %a, i32 %b) {
+ %c = icmp slt i32 %a, 0 ; <i1> [#uses=1]
+ %d = zext i1 %c to i32 ; <i32> [#uses=1]
+ ; e = b & (a >> 31)
+ %e = mul i32 %d, %b ; <i32> [#uses=1]
+ ret i32 %e
+}
+
+define i32 @test11(i32 %a, i32 %b) {
+ %c = icmp sle i32 %a, -1 ; <i1> [#uses=1]
+ %d = zext i1 %c to i32 ; <i32> [#uses=1]
+ ; e = b & (a >> 31)
+ %e = mul i32 %d, %b ; <i32> [#uses=1]
+ ret i32 %e
+}
+
+define i32 @test12(i8 %a, i32 %b) {
+ %c = icmp ugt i8 %a, 127 ; <i1> [#uses=1]
+ %d = zext i1 %c to i32 ; <i32> [#uses=1]
+ ; e = b & (a >> 31)
+ %e = mul i32 %d, %b ; <i32> [#uses=1]
+ ret i32 %e
+}
+
+; PR2642
+define internal void @test13(<4 x float>*) {
+ load <4 x float>* %0, align 1
+ fmul <4 x float> %2, < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >
+ store <4 x float> %3, <4 x float>* %0, align 1
+ ret void
+}
+
+define <16 x i8> @test14(<16 x i8> %a) {
+ %b = mul <16 x i8> %a, zeroinitializer
+ ret <16 x i8> %b
+}
+
+; rdar://7293527
+define i32 @test15(i32 %A, i32 %B) {
+entry:
+ %shl = shl i32 1, %B
+ %m = mul i32 %shl, %A
+ ret i32 %m
+}
+
+; X * Y (when Y is 0 or 1) --> x & (0-Y)
+define i32 @test16(i32 %b, i1 %c) {
+ %d = zext i1 %c to i32 ; <i32> [#uses=1]
+ ; e = b & (a >> 31)
+ %e = mul i32 %d, %b ; <i32> [#uses=1]
+ ret i32 %e
+}
+
+; X * Y (when Y is 0 or 1) --> x & (0-Y)
+define i32 @test17(i32 %a, i32 %b) {
+ %a.lobit = lshr i32 %a, 31
+ %e = mul i32 %a.lobit, %b
+ ret i32 %e
+}
+
+
+
diff --git a/test/Transforms/InstCombine/multi-use-or.ll b/test/Transforms/InstCombine/multi-use-or.ll
new file mode 100644
index 0000000..9bbef23
--- /dev/null
+++ b/test/Transforms/InstCombine/multi-use-or.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -S | grep {add double .sx, .sy}
+; The 'or' has multiple uses, make sure that this doesn't prevent instcombine
+; from propagating the extends to the truncs.
+
+define double @ScaleObjectAdd(double %sx, double %sy, double %sz) nounwind {
+entry:
+ %sx34 = bitcast double %sx to i64 ; <i64> [#uses=1]
+ %sx3435 = zext i64 %sx34 to i192 ; <i192> [#uses=1]
+ %sy22 = bitcast double %sy to i64 ; <i64> [#uses=1]
+ %sy2223 = zext i64 %sy22 to i192 ; <i192> [#uses=1]
+ %sy222324 = shl i192 %sy2223, 128 ; <i192> [#uses=1]
+ %sy222324.ins = or i192 %sx3435, %sy222324 ; <i192> [#uses=1]
+
+
+ %a = trunc i192 %sy222324.ins to i64 ; <i64> [#uses=1]
+ %b = bitcast i64 %a to double ; <double> [#uses=1]
+ %c = lshr i192 %sy222324.ins, 128 ; <i192> [#uses=1]
+ %d = trunc i192 %c to i64 ; <i64> [#uses=1]
+ %e = bitcast i64 %d to double ; <double> [#uses=1]
+ %f = fadd double %b, %e
+
+; ret double %e
+ ret double %f
+}
diff --git a/test/Transforms/InstCombine/narrow.ll b/test/Transforms/InstCombine/narrow.ll
new file mode 100644
index 0000000..1b96a06
--- /dev/null
+++ b/test/Transforms/InstCombine/narrow.ll
@@ -0,0 +1,18 @@
+; This file contains various testcases that check to see that instcombine
+; is narrowing computations when possible.
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {ret i1 false}
+
+; test1 - Eliminating the casts in this testcase (by narrowing the AND
+; operation) allows instcombine to realize the function always returns false.
+;
+define i1 @test1(i32 %A, i32 %B) {
+ %C1 = icmp slt i32 %A, %B ; <i1> [#uses=1]
+ %ELIM1 = zext i1 %C1 to i32 ; <i32> [#uses=1]
+ %C2 = icmp sgt i32 %A, %B ; <i1> [#uses=1]
+ %ELIM2 = zext i1 %C2 to i32 ; <i32> [#uses=1]
+ %C3 = and i32 %ELIM1, %ELIM2 ; <i32> [#uses=1]
+ %ELIM3 = trunc i32 %C3 to i1 ; <i1> [#uses=1]
+ ret i1 %ELIM3
+}
+
diff --git a/test/Transforms/InstCombine/no-negzero.ll b/test/Transforms/InstCombine/no-negzero.ll
new file mode 100644
index 0000000..f295130
--- /dev/null
+++ b/test/Transforms/InstCombine/no-negzero.ll
@@ -0,0 +1,33 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; ModuleID = '3555a.c'
+; sqrt(fabs) cannot be negative zero, so we should eliminate the fadd.
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.8"
+
+; CHECK: @mysqrt
+; CHECK-NOT: fadd
+; CHECK: ret
+define double @mysqrt(double %x) nounwind {
+entry:
+ %x_addr = alloca double ; <double*> [#uses=2]
+ %retval = alloca double, align 8 ; <double*> [#uses=2]
+ %0 = alloca double, align 8 ; <double*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store double %x, double* %x_addr
+ %1 = load double* %x_addr, align 8 ; <double> [#uses=1]
+ %2 = call double @fabs(double %1) nounwind readnone ; <double> [#uses=1]
+ %3 = call double @sqrt(double %2) nounwind readonly ; <double> [#uses=1]
+ %4 = fadd double %3, 0.000000e+00 ; <double> [#uses=1]
+ store double %4, double* %0, align 8
+ %5 = load double* %0, align 8 ; <double> [#uses=1]
+ store double %5, double* %retval, align 8
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load double* %retval ; <double> [#uses=1]
+ ret double %retval1
+}
+
+declare double @fabs(double)
+
+declare double @sqrt(double) nounwind readonly
diff --git a/test/Transforms/InstCombine/not-fcmp.ll b/test/Transforms/InstCombine/not-fcmp.ll
new file mode 100644
index 0000000..ad01a6b
--- /dev/null
+++ b/test/Transforms/InstCombine/not-fcmp.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep "fcmp uge"
+; PR1570
+
+define i1 @f(float %X, float %Y) {
+entry:
+ %tmp3 = fcmp olt float %X, %Y ; <i1> [#uses=1]
+ %toBoolnot5 = xor i1 %tmp3, true ; <i1> [#uses=1]
+ ret i1 %toBoolnot5
+}
+
diff --git a/test/Transforms/InstCombine/not.ll b/test/Transforms/InstCombine/not.ll
new file mode 100644
index 0000000..c58ce11
--- /dev/null
+++ b/test/Transforms/InstCombine/not.ll
@@ -0,0 +1,54 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+
+; RUN: opt < %s -instcombine -S | not grep xor
+
+define i32 @test1(i32 %A) {
+ %B = xor i32 %A, -1 ; <i32> [#uses=1]
+ %C = xor i32 %B, -1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i1 @test2(i32 %A, i32 %B) {
+ ; Can change into setge
+ %cond = icmp sle i32 %A, %B ; <i1> [#uses=1]
+ %Ret = xor i1 %cond, true ; <i1> [#uses=1]
+ ret i1 %Ret
+}
+
+; Test that demorgans law can be instcombined
+define i32 @test3(i32 %A, i32 %B) {
+ %a = xor i32 %A, -1 ; <i32> [#uses=1]
+ %b = xor i32 %B, -1 ; <i32> [#uses=1]
+ %c = and i32 %a, %b ; <i32> [#uses=1]
+ %d = xor i32 %c, -1 ; <i32> [#uses=1]
+ ret i32 %d
+}
+
+; Test that demorgens law can work with constants
+define i32 @test4(i32 %A, i32 %B) {
+ %a = xor i32 %A, -1 ; <i32> [#uses=1]
+ %c = and i32 %a, 5 ; <i32> [#uses=1]
+ %d = xor i32 %c, -1 ; <i32> [#uses=1]
+ ret i32 %d
+}
+
+; test the mirror of demorgans law...
+define i32 @test5(i32 %A, i32 %B) {
+ %a = xor i32 %A, -1 ; <i32> [#uses=1]
+ %b = xor i32 %B, -1 ; <i32> [#uses=1]
+ %c = or i32 %a, %b ; <i32> [#uses=1]
+ %d = xor i32 %c, -1 ; <i32> [#uses=1]
+ ret i32 %d
+}
+
+; PR2298
+define i8 @test6(i32 %a, i32 %b) zeroext nounwind {
+entry:
+ %tmp1not = xor i32 %a, -1 ; <i32> [#uses=1]
+ %tmp2not = xor i32 %b, -1 ; <i32> [#uses=1]
+ %tmp3 = icmp slt i32 %tmp1not, %tmp2not ; <i1> [#uses=1]
+ %retval67 = zext i1 %tmp3 to i8 ; <i8> [#uses=1]
+ ret i8 %retval67
+}
+
diff --git a/test/Transforms/InstCombine/nothrow.ll b/test/Transforms/InstCombine/nothrow.ll
new file mode 100644
index 0000000..08d90bf
--- /dev/null
+++ b/test/Transforms/InstCombine/nothrow.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | not grep call
+; rdar://6880732
+declare double @t1(i32) readonly
+
+define void @t2() nounwind {
+ call double @t1(i32 42) ;; dead call even though callee is not nothrow.
+ ret void
+}
diff --git a/test/Transforms/InstCombine/nsw.ll b/test/Transforms/InstCombine/nsw.ll
new file mode 100644
index 0000000..821cebe
--- /dev/null
+++ b/test/Transforms/InstCombine/nsw.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; CHECK: define i32 @foo
+; %y = sub i32 0, %x
+; %z = sdiv i32 %y, 337
+; ret i32 %y
+define i32 @foo(i32 %x) {
+ %y = sub i32 0, %x
+ %z = sdiv i32 %y, 337
+ ret i32 %y
+}
+
+; CHECK: define i32 @bar
+; %y = sdiv i32 %x, -337
+; ret i32 %y
+define i32 @bar(i32 %x) {
+ %y = sub nsw i32 0, %x
+ %z = sdiv i32 %y, 337
+ ret i32 %y
+}
diff --git a/test/Transforms/InstCombine/objsize.ll b/test/Transforms/InstCombine/objsize.ll
new file mode 100644
index 0000000..69e09f6
--- /dev/null
+++ b/test/Transforms/InstCombine/objsize.ll
@@ -0,0 +1,52 @@
+; Test a pile of objectsize bounds checking.
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; We need target data to get the sizes of the arrays and structures.
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+
+@a = private global [60 x i8] zeroinitializer, align 1 ; <[60 x i8]*>
+@.str = private constant [8 x i8] c"abcdefg\00" ; <[8 x i8]*>
+
+define i32 @foo() nounwind {
+; CHECK: @foo
+; CHECK-NEXT: ret i32 60
+ %1 = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i1 false)
+ ret i32 %1
+}
+
+define i8* @bar() nounwind {
+; CHECK: @bar
+entry:
+ %retval = alloca i8*
+ %0 = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i1 false)
+ %cmp = icmp ne i32 %0, -1
+; CHECK: br i1 true
+ br i1 %cmp, label %cond.true, label %cond.false
+
+cond.true:
+ %1 = load i8** %retval;
+ ret i8* %1;
+
+cond.false:
+ %2 = load i8** %retval;
+ ret i8* %2;
+}
+
+; FIXME: Should be ret i32 0
+define i32 @f() nounwind {
+; CHECK: @f
+; CHECK-NEXT: llvm.objectsize.i32
+ %1 = call i32 @llvm.objectsize.i32(i8* getelementptr ([60 x i8]* @a, i32 1, i32 0), i1 false)
+ ret i32 %1
+}
+
+@window = external global [0 x i8]
+
+define i1 @baz() nounwind {
+; CHECK: @baz
+; CHECK-NEXT: ret i1 true
+ %1 = tail call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([0 x i8]* @window, i32 0, i32 0), i1 false)
+ %2 = icmp eq i32 %1, -1
+ ret i1 %2
+}
+
+declare i32 @llvm.objectsize.i32(i8*, i1) nounwind readonly \ No newline at end of file
diff --git a/test/Transforms/InstCombine/odr-linkage.ll b/test/Transforms/InstCombine/odr-linkage.ll
new file mode 100644
index 0000000..a64ef28
--- /dev/null
+++ b/test/Transforms/InstCombine/odr-linkage.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | grep {ret i32 10}
+
+@g1 = available_externally constant i32 1
+@g2 = linkonce_odr constant i32 2
+@g3 = weak_odr constant i32 3
+@g4 = internal constant i32 4
+
+define i32 @test() {
+ %A = load i32* @g1
+ %B = load i32* @g2
+ %C = load i32* @g3
+ %D = load i32* @g4
+
+ %a = add i32 %A, %B
+ %b = add i32 %a, %C
+ %c = add i32 %b, %D
+ ret i32 %c
+}
+ \ No newline at end of file
diff --git a/test/Transforms/InstCombine/or-fcmp.ll b/test/Transforms/InstCombine/or-fcmp.ll
new file mode 100644
index 0000000..9692bfc
--- /dev/null
+++ b/test/Transforms/InstCombine/or-fcmp.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -instcombine -S | grep fcmp | count 3
+; RUN: opt < %s -instcombine -S | grep ret | grep 1
+
+define zeroext i8 @t1(float %x, float %y) nounwind {
+ %a = fcmp ueq float %x, %y ; <i1> [#uses=1]
+ %b = fcmp uno float %x, %y ; <i1> [#uses=1]
+ %c = or i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+define zeroext i8 @t2(float %x, float %y) nounwind {
+ %a = fcmp olt float %x, %y ; <i1> [#uses=1]
+ %b = fcmp oeq float %x, %y ; <i1> [#uses=1]
+ %c = or i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+define zeroext i8 @t3(float %x, float %y) nounwind {
+ %a = fcmp ult float %x, %y ; <i1> [#uses=1]
+ %b = fcmp uge float %x, %y ; <i1> [#uses=1]
+ %c = or i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+define zeroext i8 @t4(float %x, float %y) nounwind {
+ %a = fcmp ult float %x, %y ; <i1> [#uses=1]
+ %b = fcmp ugt float %x, %y ; <i1> [#uses=1]
+ %c = or i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
diff --git a/test/Transforms/InstCombine/or-to-xor.ll b/test/Transforms/InstCombine/or-to-xor.ll
new file mode 100644
index 0000000..1495ee4
--- /dev/null
+++ b/test/Transforms/InstCombine/or-to-xor.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -instcombine -S | grep {xor i32 %a, %b} | count 4
+; RUN: opt < %s -instcombine -S | not grep {and}
+
+define i32 @func1(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %b_not = xor i32 %b, -1
+ %0 = and i32 %a, %b_not
+ %a_not = xor i32 %a, -1
+ %1 = and i32 %a_not, %b
+ %2 = or i32 %0, %1
+ ret i32 %2
+}
+
+define i32 @func2(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %b_not = xor i32 %b, -1
+ %0 = and i32 %b_not, %a
+ %a_not = xor i32 %a, -1
+ %1 = and i32 %a_not, %b
+ %2 = or i32 %0, %1
+ ret i32 %2
+}
+
+define i32 @func3(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %b_not = xor i32 %b, -1
+ %0 = and i32 %a, %b_not
+ %a_not = xor i32 %a, -1
+ %1 = and i32 %b, %a_not
+ %2 = or i32 %0, %1
+ ret i32 %2
+}
+
+define i32 @func4(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %b_not = xor i32 %b, -1
+ %0 = and i32 %b_not, %a
+ %a_not = xor i32 %a, -1
+ %1 = and i32 %b, %a_not
+ %2 = or i32 %0, %1
+ ret i32 %2
+}
diff --git a/test/Transforms/InstCombine/or.ll b/test/Transforms/InstCombine/or.ll
new file mode 100644
index 0000000..189be10
--- /dev/null
+++ b/test/Transforms/InstCombine/or.ll
@@ -0,0 +1,352 @@
+; This test makes sure that these instructions are properly eliminated.
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+
+define i32 @test1(i32 %A) {
+ %B = or i32 %A, 0
+ ret i32 %B
+; CHECK: @test1
+; CHECK: ret i32 %A
+}
+
+define i32 @test2(i32 %A) {
+ %B = or i32 %A, -1
+ ret i32 %B
+; CHECK: @test2
+; CHECK: ret i32 -1
+}
+
+define i8 @test2a(i8 %A) {
+ %B = or i8 %A, -1
+ ret i8 %B
+; CHECK: @test2a
+; CHECK: ret i8 -1
+}
+
+define i1 @test3(i1 %A) {
+ %B = or i1 %A, false
+ ret i1 %B
+; CHECK: @test3
+; CHECK: ret i1 %A
+}
+
+define i1 @test4(i1 %A) {
+ %B = or i1 %A, true
+ ret i1 %B
+; CHECK: @test4
+; CHECK: ret i1 true
+}
+
+define i1 @test5(i1 %A) {
+ %B = or i1 %A, %A
+ ret i1 %B
+; CHECK: @test5
+; CHECK: ret i1 %A
+}
+
+define i32 @test6(i32 %A) {
+ %B = or i32 %A, %A
+ ret i32 %B
+; CHECK: @test6
+; CHECK: ret i32 %A
+}
+
+; A | ~A == -1
+define i32 @test7(i32 %A) {
+ %NotA = xor i32 -1, %A
+ %B = or i32 %A, %NotA
+ ret i32 %B
+; CHECK: @test7
+; CHECK: ret i32 -1
+}
+
+define i8 @test8(i8 %A) {
+ %B = or i8 %A, -2
+ %C = or i8 %B, 1
+ ret i8 %C
+; CHECK: @test8
+; CHECK: ret i8 -1
+}
+
+; Test that (A|c1)|(B|c2) == (A|B)|(c1|c2)
+define i8 @test9(i8 %A, i8 %B) {
+ %C = or i8 %A, 1
+ %D = or i8 %B, -2
+ %E = or i8 %C, %D
+ ret i8 %E
+; CHECK: @test9
+; CHECK: ret i8 -1
+}
+
+define i8 @test10(i8 %A) {
+ %B = or i8 %A, 1
+ %C = and i8 %B, -2
+ ; (X & C1) | C2 --> (X | C2) & (C1|C2)
+ %D = or i8 %C, -2
+ ret i8 %D
+; CHECK: @test10
+; CHECK: ret i8 -2
+}
+
+define i8 @test11(i8 %A) {
+ %B = or i8 %A, -2
+ %C = xor i8 %B, 13
+ ; (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
+ %D = or i8 %C, 1
+ %E = xor i8 %D, 12
+ ret i8 %E
+; CHECK: @test11
+; CHECK: ret i8 -1
+}
+
+define i32 @test12(i32 %A) {
+ ; Should be eliminated
+ %B = or i32 %A, 4
+ %C = and i32 %B, 8
+ ret i32 %C
+; CHECK: @test12
+; CHECK: %C = and i32 %A, 8
+; CHECK: ret i32 %C
+}
+
+define i32 @test13(i32 %A) {
+ %B = or i32 %A, 12
+ ; Always equal to 8
+ %C = and i32 %B, 8
+ ret i32 %C
+; CHECK: @test13
+; CHECK: ret i32 8
+}
+
+define i1 @test14(i32 %A, i32 %B) {
+ %C1 = icmp ult i32 %A, %B
+ %C2 = icmp ugt i32 %A, %B
+ ; (A < B) | (A > B) === A != B
+ %D = or i1 %C1, %C2
+ ret i1 %D
+; CHECK: @test14
+; CHECK: %D = icmp ne i32 %A, %B
+; CHECK: ret i1 %D
+}
+
+define i1 @test15(i32 %A, i32 %B) {
+ %C1 = icmp ult i32 %A, %B
+ %C2 = icmp eq i32 %A, %B
+ ; (A < B) | (A == B) === A <= B
+ %D = or i1 %C1, %C2
+ ret i1 %D
+; CHECK: @test15
+; CHECK: %D = icmp ule i32 %A, %B
+; CHECK: ret i1 %D
+}
+
+define i32 @test16(i32 %A) {
+ %B = and i32 %A, 1
+ ; -2 = ~1
+ %C = and i32 %A, -2
+ ; %D = and int %B, -1 == %B
+ %D = or i32 %B, %C
+ ret i32 %D
+; CHECK: @test16
+; CHECK: ret i32 %A
+}
+
+define i32 @test17(i32 %A) {
+ %B = and i32 %A, 1
+ %C = and i32 %A, 4
+ ; %D = and int %B, 5
+ %D = or i32 %B, %C
+ ret i32 %D
+; CHECK: @test17
+; CHECK: %D = and i32 %A, 5
+; CHECK: ret i32 %D
+}
+
+define i1 @test18(i32 %A) {
+ %B = icmp sge i32 %A, 100
+ %C = icmp slt i32 %A, 50
+ ;; (A-50) >u 50
+ %D = or i1 %B, %C
+ ret i1 %D
+; CHECK: @test18
+; CHECK: add i32
+; CHECK: %D = icmp ugt
+; CHECK: ret i1 %D
+}
+
+define i1 @test19(i32 %A) {
+ %B = icmp eq i32 %A, 50
+ %C = icmp eq i32 %A, 51
+ ;; (A-50) < 2
+ %D = or i1 %B, %C
+ ret i1 %D
+; CHECK: @test19
+; CHECK: add i32
+; CHECK: %D = icmp ult
+; CHECK: ret i1 %D
+}
+
+define i32 @test20(i32 %x) {
+ %y = and i32 %x, 123
+ %z = or i32 %y, %x
+ ret i32 %z
+; CHECK: @test20
+; CHECK: ret i32 %x
+}
+
+define i32 @test21(i32 %tmp.1) {
+ %tmp.1.mask1 = add i32 %tmp.1, 2
+ %tmp.3 = and i32 %tmp.1.mask1, -2
+ %tmp.5 = and i32 %tmp.1, 1
+ ;; add tmp.1, 2
+ %tmp.6 = or i32 %tmp.5, %tmp.3
+ ret i32 %tmp.6
+; CHECK: @test21
+; CHECK: add i32 %{{[^,]*}}, 2
+; CHECK: ret i32
+}
+
+define i32 @test22(i32 %B) {
+ %ELIM41 = and i32 %B, 1
+ %ELIM7 = and i32 %B, -2
+ %ELIM5 = or i32 %ELIM41, %ELIM7
+ ret i32 %ELIM5
+; CHECK: @test22
+; CHECK: ret i32 %B
+}
+
+define i16 @test23(i16 %A) {
+ %B = lshr i16 %A, 1
+ ;; fold or into xor
+ %C = or i16 %B, -32768
+ %D = xor i16 %C, 8193
+ ret i16 %D
+; CHECK: @test23
+; CHECK: %B = lshr i16 %A, 1
+; CHECK: %D = xor i16 %B, -24575
+; CHECK: ret i16 %D
+}
+
+; PR1738
+define i1 @test24(double %X, double %Y) {
+ %tmp9 = fcmp uno double %X, 0.000000e+00 ; <i1> [#uses=1]
+ %tmp13 = fcmp uno double %Y, 0.000000e+00 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp13, %tmp9 ; <i1> [#uses=1]
+ ret i1 %bothcond
+
+; CHECK: @test24
+; CHECK: %bothcond = fcmp uno double %Y, %X ; <i1> [#uses=1]
+; CHECK: ret i1 %bothcond
+}
+
+; PR3266 & PR5276
+define i1 @test25(i32 %A, i32 %B) {
+ %C = icmp eq i32 %A, 0
+ %D = icmp eq i32 %B, 57
+ %E = or i1 %C, %D
+ %F = xor i1 %E, -1
+ ret i1 %F
+
+; CHECK: @test25
+; CHECK: icmp ne i32 %A, 0
+; CHECK-NEXT: icmp ne i32 %B, 57
+; CHECK-NEXT: %F = and i1
+; CHECK-NEXT: ret i1 %F
+}
+
+; PR5634
+define i1 @test26(i32 %A, i32 %B) {
+ %C1 = icmp eq i32 %A, 0
+ %C2 = icmp eq i32 %B, 0
+ ; (A == 0) & (A == 0) --> (A|B) == 0
+ %D = and i1 %C1, %C2
+ ret i1 %D
+; CHECK: @test26
+; CHECK: or i32 %A, %B
+; CHECK: icmp eq i32 {{.*}}, 0
+; CHECK: ret i1
+}
+
+define i1 @test27(i32* %A, i32* %B) {
+ %C1 = ptrtoint i32* %A to i32
+ %C2 = ptrtoint i32* %B to i32
+ %D = or i32 %C1, %C2
+ %E = icmp eq i32 %D, 0
+ ret i1 %E
+; CHECK: @test27
+; CHECK: icmp eq i32* %A, null
+; CHECK: icmp eq i32* %B, null
+; CHECK: and i1
+; CHECK: ret i1
+}
+
+; PR5634
+define i1 @test28(i32 %A, i32 %B) {
+ %C1 = icmp ne i32 %A, 0
+ %C2 = icmp ne i32 %B, 0
+ ; (A != 0) | (A != 0) --> (A|B) != 0
+ %D = or i1 %C1, %C2
+ ret i1 %D
+; CHECK: @test28
+; CHECK: or i32 %A, %B
+; CHECK: icmp ne i32 {{.*}}, 0
+; CHECK: ret i1
+}
+
+define i1 @test29(i32* %A, i32* %B) {
+ %C1 = ptrtoint i32* %A to i32
+ %C2 = ptrtoint i32* %B to i32
+ %D = or i32 %C1, %C2
+ %E = icmp ne i32 %D, 0
+ ret i1 %E
+; CHECK: @test29
+; CHECK: icmp ne i32* %A, null
+; CHECK: icmp ne i32* %B, null
+; CHECK: or i1
+; CHECK: ret i1
+}
+
+; PR4216
+define i32 @test30(i32 %A) {
+entry:
+ %B = or i32 %A, 32962
+ %C = and i32 %A, -65536
+ %D = and i32 %B, 40186
+ %E = or i32 %D, %C
+ ret i32 %E
+; CHECK: @test30
+; CHECK: %B = or i32 %A, 32962
+; CHECK: %E = and i32 %B, -25350
+; CHECK: ret i32 %E
+}
+
+; PR4216
+define i64 @test31(i64 %A) nounwind readnone ssp noredzone {
+ %B = or i64 %A, 194
+ %D = and i64 %B, 250
+
+ %C = or i64 %A, 32768
+ %E = and i64 %C, 4294941696
+
+ %F = or i64 %D, %E
+ ret i64 %F
+; CHECK: @test31
+; CHECK-NEXT: %bitfield = or i64 %A, 32962
+; CHECK-NEXT: %F = and i64 %bitfield, 4294941946
+; CHECK-NEXT: ret i64 %F
+}
+
+define <4 x i32> @test32(<4 x i1> %and.i1352, <4 x i32> %vecinit6.i176, <4 x i32> %vecinit6.i191) {
+ %and.i135 = sext <4 x i1> %and.i1352 to <4 x i32> ; <<4 x i32>> [#uses=2]
+ %and.i129 = and <4 x i32> %vecinit6.i176, %and.i135 ; <<4 x i32>> [#uses=1]
+ %neg.i = xor <4 x i32> %and.i135, <i32 -1, i32 -1, i32 -1, i32 -1> ; <<4 x i32>> [#uses=1]
+ %and.i = and <4 x i32> %vecinit6.i191, %neg.i ; <<4 x i32>> [#uses=1]
+ %or.i = or <4 x i32> %and.i, %and.i129 ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %or.i
+; Don't turn this into a vector select until codegen matures to handle them
+; better.
+; CHECK: @test32
+; CHECK: or <4 x i32> %and.i, %and.i129
+}
+
diff --git a/test/Transforms/InstCombine/phi-merge-gep.ll b/test/Transforms/InstCombine/phi-merge-gep.ll
new file mode 100644
index 0000000..2671749
--- /dev/null
+++ b/test/Transforms/InstCombine/phi-merge-gep.ll
@@ -0,0 +1,102 @@
+; RUN: opt < %s -S -instcombine > %t
+; RUN: grep {= getelementptr} %t | count 20
+; RUN: grep {= phi} %t | count 13
+
+; Don't push the geps through these phis, because they would require
+; two phis each, which burdens the loop with high register pressure.
+
+define void @foo(float* %Ar, float* %Ai, i64 %As, float* %Cr, float* %Ci, i64 %Cs, i64 %n) nounwind {
+entry:
+ %0 = getelementptr inbounds float* %Ar, i64 0 ; <float*> [#uses=1]
+ %1 = getelementptr inbounds float* %Ai, i64 0 ; <float*> [#uses=1]
+ %2 = mul i64 %n, %As ; <i64> [#uses=1]
+ %3 = getelementptr inbounds float* %Ar, i64 %2 ; <float*> [#uses=1]
+ %4 = mul i64 %n, %As ; <i64> [#uses=1]
+ %5 = getelementptr inbounds float* %Ai, i64 %4 ; <float*> [#uses=1]
+ %6 = mul i64 %n, 2 ; <i64> [#uses=1]
+ %7 = mul i64 %6, %As ; <i64> [#uses=1]
+ %8 = getelementptr inbounds float* %Ar, i64 %7 ; <float*> [#uses=1]
+ %9 = mul i64 %n, 2 ; <i64> [#uses=1]
+ %10 = mul i64 %9, %As ; <i64> [#uses=1]
+ %11 = getelementptr inbounds float* %Ai, i64 %10 ; <float*> [#uses=1]
+ %12 = getelementptr inbounds float* %Cr, i64 0 ; <float*> [#uses=1]
+ %13 = getelementptr inbounds float* %Ci, i64 0 ; <float*> [#uses=1]
+ %14 = mul i64 %n, %Cs ; <i64> [#uses=1]
+ %15 = getelementptr inbounds float* %Cr, i64 %14 ; <float*> [#uses=1]
+ %16 = mul i64 %n, %Cs ; <i64> [#uses=1]
+ %17 = getelementptr inbounds float* %Ci, i64 %16 ; <float*> [#uses=1]
+ %18 = mul i64 %n, 2 ; <i64> [#uses=1]
+ %19 = mul i64 %18, %Cs ; <i64> [#uses=1]
+ %20 = getelementptr inbounds float* %Cr, i64 %19 ; <float*> [#uses=1]
+ %21 = mul i64 %n, 2 ; <i64> [#uses=1]
+ %22 = mul i64 %21, %Cs ; <i64> [#uses=1]
+ %23 = getelementptr inbounds float* %Ci, i64 %22 ; <float*> [#uses=1]
+ br label %bb13
+
+bb: ; preds = %bb13
+ %24 = load float* %A0r.0, align 4 ; <float> [#uses=1]
+ %25 = load float* %A0i.0, align 4 ; <float> [#uses=1]
+ %26 = load float* %A1r.0, align 4 ; <float> [#uses=2]
+ %27 = load float* %A1i.0, align 4 ; <float> [#uses=2]
+ %28 = load float* %A2r.0, align 4 ; <float> [#uses=2]
+ %29 = load float* %A2i.0, align 4 ; <float> [#uses=2]
+ %30 = fadd float %26, %28 ; <float> [#uses=2]
+ %31 = fadd float %27, %29 ; <float> [#uses=2]
+ %32 = fsub float %26, %28 ; <float> [#uses=1]
+ %33 = fsub float %27, %29 ; <float> [#uses=1]
+ %34 = fadd float %24, %30 ; <float> [#uses=2]
+ %35 = fadd float %25, %31 ; <float> [#uses=2]
+ %36 = fmul float %30, -1.500000e+00 ; <float> [#uses=1]
+ %37 = fmul float %31, -1.500000e+00 ; <float> [#uses=1]
+ %38 = fadd float %34, %36 ; <float> [#uses=2]
+ %39 = fadd float %35, %37 ; <float> [#uses=2]
+ %40 = fmul float %32, 0x3FEBB67AE0000000 ; <float> [#uses=2]
+ %41 = fmul float %33, 0x3FEBB67AE0000000 ; <float> [#uses=2]
+ %42 = fadd float %38, %41 ; <float> [#uses=1]
+ %43 = fsub float %39, %40 ; <float> [#uses=1]
+ %44 = fsub float %38, %41 ; <float> [#uses=1]
+ %45 = fadd float %39, %40 ; <float> [#uses=1]
+ store float %34, float* %C0r.0, align 4
+ store float %35, float* %C0i.0, align 4
+ store float %42, float* %C1r.0, align 4
+ store float %43, float* %C1i.0, align 4
+ store float %44, float* %C2r.0, align 4
+ store float %45, float* %C2i.0, align 4
+ %46 = getelementptr inbounds float* %A0r.0, i64 %As ; <float*> [#uses=1]
+ %47 = getelementptr inbounds float* %A0i.0, i64 %As ; <float*> [#uses=1]
+ %48 = getelementptr inbounds float* %A1r.0, i64 %As ; <float*> [#uses=1]
+ %49 = getelementptr inbounds float* %A1i.0, i64 %As ; <float*> [#uses=1]
+ %50 = getelementptr inbounds float* %A2r.0, i64 %As ; <float*> [#uses=1]
+ %51 = getelementptr inbounds float* %A2i.0, i64 %As ; <float*> [#uses=1]
+ %52 = getelementptr inbounds float* %C0r.0, i64 %Cs ; <float*> [#uses=1]
+ %53 = getelementptr inbounds float* %C0i.0, i64 %Cs ; <float*> [#uses=1]
+ %54 = getelementptr inbounds float* %C1r.0, i64 %Cs ; <float*> [#uses=1]
+ %55 = getelementptr inbounds float* %C1i.0, i64 %Cs ; <float*> [#uses=1]
+ %56 = getelementptr inbounds float* %C2r.0, i64 %Cs ; <float*> [#uses=1]
+ %57 = getelementptr inbounds float* %C2i.0, i64 %Cs ; <float*> [#uses=1]
+ %58 = add nsw i64 %i.0, 1 ; <i64> [#uses=1]
+ br label %bb13
+
+bb13: ; preds = %bb, %entry
+ %i.0 = phi i64 [ 0, %entry ], [ %58, %bb ] ; <i64> [#uses=2]
+ %C2i.0 = phi float* [ %23, %entry ], [ %57, %bb ] ; <float*> [#uses=2]
+ %C2r.0 = phi float* [ %20, %entry ], [ %56, %bb ] ; <float*> [#uses=2]
+ %C1i.0 = phi float* [ %17, %entry ], [ %55, %bb ] ; <float*> [#uses=2]
+ %C1r.0 = phi float* [ %15, %entry ], [ %54, %bb ] ; <float*> [#uses=2]
+ %C0i.0 = phi float* [ %13, %entry ], [ %53, %bb ] ; <float*> [#uses=2]
+ %C0r.0 = phi float* [ %12, %entry ], [ %52, %bb ] ; <float*> [#uses=2]
+ %A2i.0 = phi float* [ %11, %entry ], [ %51, %bb ] ; <float*> [#uses=2]
+ %A2r.0 = phi float* [ %8, %entry ], [ %50, %bb ] ; <float*> [#uses=2]
+ %A1i.0 = phi float* [ %5, %entry ], [ %49, %bb ] ; <float*> [#uses=2]
+ %A1r.0 = phi float* [ %3, %entry ], [ %48, %bb ] ; <float*> [#uses=2]
+ %A0i.0 = phi float* [ %1, %entry ], [ %47, %bb ] ; <float*> [#uses=2]
+ %A0r.0 = phi float* [ %0, %entry ], [ %46, %bb ] ; <float*> [#uses=2]
+ %59 = icmp slt i64 %i.0, %n ; <i1> [#uses=1]
+ br i1 %59, label %bb, label %bb14
+
+bb14: ; preds = %bb13
+ br label %return
+
+return: ; preds = %bb14
+ ret void
+}
diff --git a/test/Transforms/InstCombine/phi.ll b/test/Transforms/InstCombine/phi.ll
new file mode 100644
index 0000000..f0343e4
--- /dev/null
+++ b/test/Transforms/InstCombine/phi.ll
@@ -0,0 +1,364 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128:n8:16:32:64"
+
+define i32 @test1(i32 %A, i1 %b) {
+BB0:
+ br i1 %b, label %BB1, label %BB2
+
+BB1:
+ ; Combine away one argument PHI nodes
+ %B = phi i32 [ %A, %BB0 ]
+ ret i32 %B
+
+BB2:
+ ret i32 %A
+; CHECK: @test1
+; CHECK: BB1:
+; CHECK-NEXT: ret i32 %A
+}
+
+define i32 @test2(i32 %A, i1 %b) {
+BB0:
+ br i1 %b, label %BB1, label %BB2
+
+BB1:
+ br label %BB2
+
+BB2:
+ ; Combine away PHI nodes with same values
+ %B = phi i32 [ %A, %BB0 ], [ %A, %BB1 ]
+ ret i32 %B
+; CHECK: @test2
+; CHECK: BB2:
+; CHECK-NEXT: ret i32 %A
+}
+
+define i32 @test3(i32 %A, i1 %b) {
+BB0:
+ br label %Loop
+
+Loop:
+ ; PHI has same value always.
+ %B = phi i32 [ %A, %BB0 ], [ %B, %Loop ]
+ br i1 %b, label %Loop, label %Exit
+
+Exit:
+ ret i32 %B
+; CHECK: @test3
+; CHECK: Exit:
+; CHECK-NEXT: ret i32 %A
+}
+
+define i32 @test4(i1 %b) {
+BB0:
+ ; Loop is unreachable
+ ret i32 7
+
+Loop: ; preds = %L2, %Loop
+ ; PHI has same value always.
+ %B = phi i32 [ %B, %L2 ], [ %B, %Loop ]
+ br i1 %b, label %L2, label %Loop
+
+L2: ; preds = %Loop
+ br label %Loop
+; CHECK: @test4
+; CHECK: Loop:
+; CHECK-NEXT: br i1 %b
+}
+
+define i32 @test5(i32 %A, i1 %b) {
+BB0:
+ br label %Loop
+
+Loop: ; preds = %Loop, %BB0
+ ; PHI has same value always.
+ %B = phi i32 [ %A, %BB0 ], [ undef, %Loop ]
+ br i1 %b, label %Loop, label %Exit
+
+Exit: ; preds = %Loop
+ ret i32 %B
+; CHECK: @test5
+; CHECK: Loop:
+; CHECK-NEXT: br i1 %b
+; CHECK: Exit:
+; CHECK-NEXT: ret i32 %A
+}
+
+define i32 @test6(i16 %A, i1 %b) {
+BB0:
+ %X = zext i16 %A to i32
+ br i1 %b, label %BB1, label %BB2
+
+BB1:
+ %Y = zext i16 %A to i32
+ br label %BB2
+
+BB2:
+ ;; Suck casts into phi
+ %B = phi i32 [ %X, %BB0 ], [ %Y, %BB1 ]
+ ret i32 %B
+; CHECK: @test6
+; CHECK: BB2:
+; CHECK: zext i16 %A to i32
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test7(i32 %A, i1 %b) {
+BB0:
+ br label %Loop
+
+Loop: ; preds = %Loop, %BB0
+ ; PHI is dead.
+ %B = phi i32 [ %A, %BB0 ], [ %C, %Loop ]
+ %C = add i32 %B, 123
+ br i1 %b, label %Loop, label %Exit
+
+Exit: ; preds = %Loop
+ ret i32 0
+; CHECK: @test7
+; CHECK: Loop:
+; CHECK-NEXT: br i1 %b
+}
+
+define i32* @test8({ i32, i32 } *%A, i1 %b) {
+BB0:
+ %X = getelementptr { i32, i32 } *%A, i32 0, i32 1
+ br i1 %b, label %BB1, label %BB2
+
+BB1:
+ %Y = getelementptr { i32, i32 } *%A, i32 0, i32 1
+ br label %BB2
+
+BB2:
+ ;; Suck GEPs into phi
+ %B = phi i32* [ %X, %BB0 ], [ %Y, %BB1 ]
+ ret i32* %B
+; CHECK: @test8
+; CHECK-NOT: phi
+; CHECK: BB2:
+; CHECK-NEXT: %B = getelementptr
+; CHECK-NEXT: ret i32* %B
+}
+
+define i32 @test9(i32* %A, i32* %B) {
+entry:
+ %c = icmp eq i32* %A, null
+ br i1 %c, label %bb1, label %bb
+
+bb:
+ %C = load i32* %B, align 1
+ br label %bb2
+
+bb1:
+ %D = load i32* %A, align 1
+ br label %bb2
+
+bb2:
+ %E = phi i32 [ %C, %bb ], [ %D, %bb1 ]
+ ret i32 %E
+; CHECK: @test9
+; CHECK: bb2:
+; CHECK-NEXT: phi i32* [ %B, %bb ], [ %A, %bb1 ]
+; CHECK-NEXT: %E = load i32* %{{[^,]*}}, align 1
+; CHECK-NEXT: ret i32 %E
+
+}
+
+define i32 @test10(i32* %A, i32* %B) {
+entry:
+ %c = icmp eq i32* %A, null
+ br i1 %c, label %bb1, label %bb
+
+bb:
+ %C = load i32* %B, align 16
+ br label %bb2
+
+bb1:
+ %D = load i32* %A, align 32
+ br label %bb2
+
+bb2:
+ %E = phi i32 [ %C, %bb ], [ %D, %bb1 ]
+ ret i32 %E
+; CHECK: @test10
+; CHECK: bb2:
+; CHECK-NEXT: phi i32* [ %B, %bb ], [ %A, %bb1 ]
+; CHECK-NEXT: %E = load i32* %{{[^,]*}}, align 16
+; CHECK-NEXT: ret i32 %E
+}
+
+
+; PR1777
+declare i1 @test11a()
+
+define i1 @test11() {
+entry:
+ %a = alloca i32
+ %i = ptrtoint i32* %a to i32
+ %b = call i1 @test11a()
+ br i1 %b, label %one, label %two
+
+one:
+ %x = phi i32 [%i, %entry], [%y, %two]
+ %c = call i1 @test11a()
+ br i1 %c, label %two, label %end
+
+two:
+ %y = phi i32 [%i, %entry], [%x, %one]
+ %d = call i1 @test11a()
+ br i1 %d, label %one, label %end
+
+end:
+ %f = phi i32 [ %x, %one], [%y, %two]
+ ; Change the %f to %i, and the optimizer suddenly becomes a lot smarter
+ ; even though %f must equal %i at this point
+ %g = inttoptr i32 %f to i32*
+ store i32 10, i32* %g
+ %z = call i1 @test11a()
+ ret i1 %z
+; CHECK: @test11
+; CHECK-NOT: phi i32
+; CHECK: ret i1 %z
+}
+
+
+define i64 @test12(i1 %cond, i8* %Ptr, i64 %Val) {
+entry:
+ %tmp41 = ptrtoint i8* %Ptr to i64
+ %tmp42 = zext i64 %tmp41 to i128
+ br i1 %cond, label %end, label %two
+
+two:
+ %tmp36 = zext i64 %Val to i128 ; <i128> [#uses=1]
+ %tmp37 = shl i128 %tmp36, 64 ; <i128> [#uses=1]
+ %ins39 = or i128 %tmp42, %tmp37 ; <i128> [#uses=1]
+ br label %end
+
+end:
+ %tmp869.0 = phi i128 [ %tmp42, %entry ], [ %ins39, %two ]
+ %tmp32 = trunc i128 %tmp869.0 to i64 ; <i64> [#uses=1]
+ %tmp29 = lshr i128 %tmp869.0, 64 ; <i128> [#uses=1]
+ %tmp30 = trunc i128 %tmp29 to i64 ; <i64> [#uses=1]
+
+ %tmp2 = add i64 %tmp32, %tmp30
+ ret i64 %tmp2
+; CHECK: @test12
+; CHECK-NOT: zext
+; CHECK: end:
+; CHECK-NEXT: phi i64 [ 0, %entry ], [ %Val, %two ]
+; CHECK-NOT: phi
+; CHECK: ret i64
+}
+
+declare void @test13f(double, i32)
+
+define void @test13(i1 %cond, i32 %V1, double %Vald) {
+entry:
+ %tmp42 = zext i32 %V1 to i128
+ br i1 %cond, label %end, label %two
+
+two:
+ %Val = bitcast double %Vald to i64
+ %tmp36 = zext i64 %Val to i128 ; <i128> [#uses=1]
+ %tmp37 = shl i128 %tmp36, 64 ; <i128> [#uses=1]
+ %ins39 = or i128 %tmp42, %tmp37 ; <i128> [#uses=1]
+ br label %end
+
+end:
+ %tmp869.0 = phi i128 [ %tmp42, %entry ], [ %ins39, %two ]
+ %tmp32 = trunc i128 %tmp869.0 to i32
+ %tmp29 = lshr i128 %tmp869.0, 64 ; <i128> [#uses=1]
+ %tmp30 = trunc i128 %tmp29 to i64 ; <i64> [#uses=1]
+ %tmp31 = bitcast i64 %tmp30 to double
+
+ call void @test13f(double %tmp31, i32 %tmp32)
+ ret void
+; CHECK: @test13
+; CHECK-NOT: zext
+; CHECK: end:
+; CHECK-NEXT: phi double [ 0.000000e+00, %entry ], [ %Vald, %two ]
+; CHECK-NEXT: call void @test13f(double {{[^,]*}}, i32 %V1)
+; CHECK: ret void
+}
+
+define i640 @test14a(i320 %A, i320 %B, i1 %b1) {
+BB0:
+ %a = zext i320 %A to i640
+ %b = zext i320 %B to i640
+ br label %Loop
+
+Loop:
+ %C = phi i640 [ %a, %BB0 ], [ %b, %Loop ]
+ br i1 %b1, label %Loop, label %Exit
+
+Exit: ; preds = %Loop
+ ret i640 %C
+; CHECK: @test14a
+; CHECK: Loop:
+; CHECK-NEXT: phi i320
+}
+
+define i160 @test14b(i320 %A, i320 %B, i1 %b1) {
+BB0:
+ %a = trunc i320 %A to i160
+ %b = trunc i320 %B to i160
+ br label %Loop
+
+Loop:
+ %C = phi i160 [ %a, %BB0 ], [ %b, %Loop ]
+ br i1 %b1, label %Loop, label %Exit
+
+Exit: ; preds = %Loop
+ ret i160 %C
+; CHECK: @test14b
+; CHECK: Loop:
+; CHECK-NEXT: phi i160
+}
+
+declare i64 @test15a(i64)
+
+define i64 @test15b(i64 %A, i1 %b) {
+; CHECK: @test15b
+entry:
+ %i0 = zext i64 %A to i128
+ %i1 = shl i128 %i0, 64
+ %i = or i128 %i1, %i0
+ br i1 %b, label %one, label %two
+; CHECK: entry:
+; CHECK-NEXT: br i1 %b
+
+one:
+ %x = phi i128 [%i, %entry], [%y, %two]
+ %x1 = lshr i128 %x, 64
+ %x2 = trunc i128 %x1 to i64
+ %c = call i64 @test15a(i64 %x2)
+ %c1 = zext i64 %c to i128
+ br label %two
+
+; CHECK: one:
+; CHECK-NEXT: phi i64
+; CHECK-NEXT: %c = call i64 @test15a
+
+two:
+ %y = phi i128 [%i, %entry], [%c1, %one]
+ %y1 = lshr i128 %y, 64
+ %y2 = trunc i128 %y1 to i64
+ %d = call i64 @test15a(i64 %y2)
+ %d1 = trunc i64 %d to i1
+ br i1 %d1, label %one, label %end
+
+; CHECK: two:
+; CHECK-NEXT: phi i64
+; CHECK-NEXT: phi i64
+; CHECK-NEXT: %d = call i64 @test15a
+
+end:
+ %g = trunc i128 %y to i64
+ ret i64 %g
+; CHECK: end:
+; CHECK-NEXT: ret i64
+}
+
diff --git a/test/Transforms/InstCombine/pr2645-0.ll b/test/Transforms/InstCombine/pr2645-0.ll
new file mode 100644
index 0000000..9bcaa43
--- /dev/null
+++ b/test/Transforms/InstCombine/pr2645-0.ll
@@ -0,0 +1,33 @@
+; RUN: opt < %s -instcombine -S | grep {insertelement <4 x float> undef}
+
+; Instcombine should be able to prove that none of the
+; insertelement's first operand's elements are needed.
+
+define internal void @""(i8*) {
+; <label>:1
+ bitcast i8* %0 to i32* ; <i32*>:2 [#uses=1]
+ load i32* %2, align 1 ; <i32>:3 [#uses=1]
+ getelementptr i8* %0, i32 4 ; <i8*>:4 [#uses=1]
+ bitcast i8* %4 to i32* ; <i32*>:5 [#uses=1]
+ load i32* %5, align 1 ; <i32>:6 [#uses=1]
+ br label %7
+
+; <label>:7 ; preds = %9, %1
+ %.01 = phi <4 x float> [ undef, %1 ], [ %12, %9 ] ; <<4 x float>> [#uses=1]
+ %.0 = phi i32 [ %3, %1 ], [ %15, %9 ] ; <i32> [#uses=3]
+ icmp slt i32 %.0, %6 ; <i1>:8 [#uses=1]
+ br i1 %8, label %9, label %16
+
+; <label>:9 ; preds = %7
+ sitofp i32 %.0 to float ; <float>:10 [#uses=1]
+ insertelement <4 x float> %.01, float %10, i32 0 ; <<4 x float>>:11 [#uses=1]
+ shufflevector <4 x float> %11, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:12 [#uses=2]
+ getelementptr i8* %0, i32 48 ; <i8*>:13 [#uses=1]
+ bitcast i8* %13 to <4 x float>* ; <<4 x float>*>:14 [#uses=1]
+ store <4 x float> %12, <4 x float>* %14, align 16
+ add i32 %.0, 2 ; <i32>:15 [#uses=1]
+ br label %7
+
+; <label>:16 ; preds = %7
+ ret void
+}
diff --git a/test/Transforms/InstCombine/pr2645-1.ll b/test/Transforms/InstCombine/pr2645-1.ll
new file mode 100644
index 0000000..d320daf
--- /dev/null
+++ b/test/Transforms/InstCombine/pr2645-1.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -instcombine -S | grep shufflevector
+; PR2645
+
+; instcombine shouldn't delete the shufflevector.
+
+define internal void @""(i8*, i32, i8*) {
+; <label>:3
+ br label %4
+
+; <label>:4 ; preds = %6, %3
+ %.0 = phi i32 [ 0, %3 ], [ %19, %6 ] ; <i32> [#uses=4]
+ %5 = icmp slt i32 %.0, %1 ; <i1> [#uses=1]
+ br i1 %5, label %6, label %20
+
+; <label>:6 ; preds = %4
+ %7 = getelementptr i8* %2, i32 %.0 ; <i8*> [#uses=1]
+ %8 = bitcast i8* %7 to <4 x i16>* ; <<4 x i16>*> [#uses=1]
+ %9 = load <4 x i16>* %8, align 1 ; <<4 x i16>> [#uses=1]
+ %10 = bitcast <4 x i16> %9 to <1 x i64> ; <<1 x i64>> [#uses=1]
+ %11 = call <2 x i64> @foo(<1 x i64> %10)
+; <<2 x i64>> [#uses=1]
+ %12 = bitcast <2 x i64> %11 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %13 = bitcast <4 x i32> %12 to <8 x i16> ; <<8 x i16>> [#uses=2]
+ %14 = shufflevector <8 x i16> %13, <8 x i16> %13, <8 x i32> < i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3 > ; <<8 x i16>> [#uses=1]
+ %15 = bitcast <8 x i16> %14 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %16 = sitofp <4 x i32> %15 to <4 x float> ; <<4 x float>> [#uses=1]
+ %17 = getelementptr i8* %0, i32 %.0 ; <i8*> [#uses=1]
+ %18 = bitcast i8* %17 to <4 x float>* ; <<4 x float>*> [#uses=1]
+ store <4 x float> %16, <4 x float>* %18, align 1
+ %19 = add i32 %.0, 1 ; <i32> [#uses=1]
+ br label %4
+
+; <label>:20 ; preds = %4
+ call void @llvm.x86.mmx.emms( )
+ ret void
+}
+
+declare <2 x i64> @foo(<1 x i64>)
+declare void @llvm.x86.mmx.emms( )
diff --git a/test/Transforms/InstCombine/pr2996.ll b/test/Transforms/InstCombine/pr2996.ll
new file mode 100644
index 0000000..ff3245d
--- /dev/null
+++ b/test/Transforms/InstCombine/pr2996.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine
+; PR2996
+
+define void @func_53(i16 signext %p_56) nounwind {
+entry:
+ %0 = icmp sgt i16 %p_56, -1 ; <i1> [#uses=1]
+ %iftmp.0.0 = select i1 %0, i32 -1, i32 0 ; <i32> [#uses=1]
+ %1 = call i32 (...)* @func_4(i32 %iftmp.0.0) nounwind ; <i32> [#uses=0]
+ ret void
+}
+
+declare i32 @func_4(...)
diff --git a/test/Transforms/InstCombine/preserve-sminmax.ll b/test/Transforms/InstCombine/preserve-sminmax.ll
new file mode 100644
index 0000000..00232cc
--- /dev/null
+++ b/test/Transforms/InstCombine/preserve-sminmax.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; Instcombine normally would fold the sdiv into the comparison,
+; making "icmp slt i32 %h, 2", but in this case the sdiv has
+; another use, so it wouldn't a big win, and it would also
+; obfuscate an otherise obvious smax pattern to the point where
+; other analyses wouldn't recognize it.
+
+define i32 @foo(i32 %h) {
+ %sd = sdiv i32 %h, 2
+ %t = icmp slt i32 %sd, 1
+ %r = select i1 %t, i32 %sd, i32 1
+ ret i32 %r
+}
+
+; CHECK: %sd = sdiv i32 %h, 2
+; CHECK: %t = icmp slt i32 %sd, 1
+; CHECK: %r = select i1 %t, i32 %sd, i32 1
+; CHECK: ret i32 %r
+
+define i32 @bar(i32 %h) {
+ %sd = sdiv i32 %h, 2
+ %t = icmp sgt i32 %sd, 1
+ %r = select i1 %t, i32 %sd, i32 1
+ ret i32 %r
+}
+
+; CHECK: %sd = sdiv i32 %h, 2
+; CHECK: %t = icmp sgt i32 %sd, 1
+; CHECK: %r = select i1 %t, i32 %sd, i32 1
+; CHECK: ret i32 %r
+
diff --git a/test/Transforms/InstCombine/ptr-int-cast.ll b/test/Transforms/InstCombine/ptr-int-cast.ll
new file mode 100644
index 0000000..c7ae689
--- /dev/null
+++ b/test/Transforms/InstCombine/ptr-int-cast.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S > %t
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+define i1 @test1(i32 *%x) nounwind {
+entry:
+; RUN: grep {ptrtoint i32\\* %x to i64} %t
+ %tmp = ptrtoint i32* %x to i1
+ ret i1 %tmp
+}
+
+define i32* @test2(i128 %x) nounwind {
+entry:
+; RUN: grep {inttoptr i64 %.mp1 to i32\\*} %t
+ %tmp = inttoptr i128 %x to i32*
+ ret i32* %tmp
+}
+
diff --git a/test/Transforms/InstCombine/rem.ll b/test/Transforms/InstCombine/rem.ll
new file mode 100644
index 0000000..bac248e
--- /dev/null
+++ b/test/Transforms/InstCombine/rem.ll
@@ -0,0 +1,83 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | not grep rem
+; END.
+
+define i32 @test1(i32 %A) {
+ %B = srem i32 %A, 1 ; ISA constant 0
+ ret i32 %B
+}
+
+define i32 @test2(i32 %A) { ; 0 % X = 0, we don't need to preserve traps
+ %B = srem i32 0, %A
+ ret i32 %B
+}
+
+define i32 @test3(i32 %A) {
+ %B = urem i32 %A, 8
+ ret i32 %B
+}
+
+define i1 @test3a(i32 %A) {
+ %B = srem i32 %A, -8
+ %C = icmp ne i32 %B, 0
+ ret i1 %C
+}
+
+define i32 @test4(i32 %X, i1 %C) {
+ %V = select i1 %C, i32 1, i32 8
+ %R = urem i32 %X, %V
+ ret i32 %R
+}
+
+define i32 @test5(i32 %X, i8 %B) {
+ %shift.upgrd.1 = zext i8 %B to i32
+ %Amt = shl i32 32, %shift.upgrd.1
+ %V = urem i32 %X, %Amt
+ ret i32 %V
+}
+
+define i32 @test6(i32 %A) {
+ %B = srem i32 %A, 0 ;; undef
+ ret i32 %B
+}
+
+define i32 @test7(i32 %A) {
+ %B = mul i32 %A, 8
+ %C = srem i32 %B, 4
+ ret i32 %C
+}
+
+define i32 @test8(i32 %A) {
+ %B = shl i32 %A, 4
+ %C = srem i32 %B, 8
+ ret i32 %C
+}
+
+define i32 @test9(i32 %A) {
+ %B = mul i32 %A, 64
+ %C = urem i32 %B, 32
+ ret i32 %C
+}
+
+define i32 @test10(i8 %c) {
+ %tmp.1 = zext i8 %c to i32
+ %tmp.2 = mul i32 %tmp.1, 4
+ %tmp.3 = sext i32 %tmp.2 to i64
+ %tmp.5 = urem i64 %tmp.3, 4
+ %tmp.6 = trunc i64 %tmp.5 to i32
+ ret i32 %tmp.6
+}
+
+define i32 @test11(i32 %i) {
+ %tmp.1 = and i32 %i, -2
+ %tmp.3 = mul i32 %tmp.1, 2
+ %tmp.5 = urem i32 %tmp.3, 4
+ ret i32 %tmp.5
+}
+
+define i32 @test12(i32 %i) {
+ %tmp.1 = and i32 %i, -4
+ %tmp.5 = srem i32 %tmp.1, 2
+ ret i32 %tmp.5
+}
diff --git a/test/Transforms/InstCombine/sdiv-1.ll b/test/Transforms/InstCombine/sdiv-1.ll
new file mode 100644
index 0000000..c46b5ea
--- /dev/null
+++ b/test/Transforms/InstCombine/sdiv-1.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -inline -S | not grep '-715827882'
+; PR3142
+
+define i32 @a(i32 %X) nounwind readnone {
+entry:
+ %0 = sub i32 0, %X
+ %1 = sdiv i32 %0, -3
+ ret i32 %1
+}
+
+define i32 @b(i32 %X) nounwind readnone {
+entry:
+ %0 = call i32 @a(i32 -2147483648)
+ ret i32 %0
+}
+
+define i32 @c(i32 %X) nounwind readnone {
+entry:
+ %0 = sub i32 0, -2147483648
+ %1 = sdiv i32 %0, -3
+ ret i32 %1
+}
diff --git a/test/Transforms/InstCombine/sdiv-2.ll b/test/Transforms/InstCombine/sdiv-2.ll
new file mode 100644
index 0000000..0e4c008
--- /dev/null
+++ b/test/Transforms/InstCombine/sdiv-2.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR3144
+
+define fastcc i32 @func(i32 %length) nounwind {
+entry:
+ %0 = icmp ne i32 %length, -1 ; <i1> [#uses=1]
+ %iftmp.13.0 = select i1 %0, i128 0, i128 200000000 ; <i128> [#uses=2]
+ %1 = sdiv i128 %iftmp.13.0, 10 ; <i128> [#uses=1]
+ br label %bb5
+
+bb5: ; preds = %bb8, %entry
+ %v.0 = phi i128 [ 0, %entry ], [ %6, %bb8 ] ; <i128> [#uses=2]
+ %2 = icmp sgt i128 %v.0, %1 ; <i1> [#uses=1]
+ br i1 %2, label %overflow, label %bb7
+
+bb7: ; preds = %bb5
+ %3 = mul i128 %v.0, 10 ; <i128> [#uses=2]
+ %4 = sub i128 %iftmp.13.0, 0 ; <i128> [#uses=1]
+ %5 = icmp slt i128 %4, %3 ; <i1> [#uses=1]
+ br i1 %5, label %overflow, label %bb8
+
+bb8: ; preds = %bb7
+ %6 = add i128 0, %3 ; <i128> [#uses=1]
+ br label %bb5
+
+overflow: ; preds = %bb7, %bb5
+ ret i32 1
+}
diff --git a/test/Transforms/InstCombine/sdiv-shift.ll b/test/Transforms/InstCombine/sdiv-shift.ll
new file mode 100644
index 0000000..f4d2b36
--- /dev/null
+++ b/test/Transforms/InstCombine/sdiv-shift.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep div
+
+define i32 @a(i16 zeroext %x, i32 %y) nounwind {
+entry:
+ %conv = zext i16 %x to i32
+ %s = shl i32 2, %y
+ %d = sdiv i32 %conv, %s
+ ret i32 %d
+}
diff --git a/test/Transforms/InstCombine/select-2.ll b/test/Transforms/InstCombine/select-2.ll
new file mode 100644
index 0000000..a76addc
--- /dev/null
+++ b/test/Transforms/InstCombine/select-2.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -S | grep select | count 2
+
+; Make sure instcombine don't fold select into operands. We don't want to emit
+; select of two integers unless it's selecting 0 / 1.
+
+define i32 @t1(i32 %c, i32 %x) nounwind {
+ %t1 = icmp eq i32 %c, 0
+ %t2 = lshr i32 %x, 18
+ %t3 = select i1 %t1, i32 %t2, i32 %x
+ ret i32 %t3
+}
+
+define i32 @t2(i32 %c, i32 %x) nounwind {
+ %t1 = icmp eq i32 %c, 0
+ %t2 = and i32 %x, 18
+ %t3 = select i1 %t1, i32 %t2, i32 %x
+ ret i32 %t3
+}
diff --git a/test/Transforms/InstCombine/select-load-call.ll b/test/Transforms/InstCombine/select-load-call.ll
new file mode 100644
index 0000000..bef0cf8
--- /dev/null
+++ b/test/Transforms/InstCombine/select-load-call.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | grep {ret i32 1}
+
+declare void @test2()
+
+define i32 @test(i1 %cond, i32 *%P) {
+ %A = alloca i32
+ store i32 1, i32* %P
+ store i32 1, i32* %A
+
+ call void @test2() readonly
+
+ %P2 = select i1 %cond, i32 *%P, i32* %A
+ %V = load i32* %P2
+ ret i32 %V
+}
diff --git a/test/Transforms/InstCombine/select.ll b/test/Transforms/InstCombine/select.ll
new file mode 100644
index 0000000..06d5338
--- /dev/null
+++ b/test/Transforms/InstCombine/select.ll
@@ -0,0 +1,440 @@
+; This test makes sure that these instructions are properly eliminated.
+; PR1822
+
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @test1(i32 %A, i32 %B) {
+ %C = select i1 false, i32 %A, i32 %B
+ ret i32 %C
+; CHECK: @test1
+; CHECK: ret i32 %B
+}
+
+define i32 @test2(i32 %A, i32 %B) {
+ %C = select i1 true, i32 %A, i32 %B
+ ret i32 %C
+; CHECK: @test2
+; CHECK: ret i32 %A
+}
+
+
+define i32 @test3(i1 %C, i32 %I) {
+ ; V = I
+ %V = select i1 %C, i32 %I, i32 %I
+ ret i32 %V
+; CHECK: @test3
+; CHECK: ret i32 %I
+}
+
+define i1 @test4(i1 %C) {
+ ; V = C
+ %V = select i1 %C, i1 true, i1 false
+ ret i1 %V
+; CHECK: @test4
+; CHECK: ret i1 %C
+}
+
+define i1 @test5(i1 %C) {
+ ; V = !C
+ %V = select i1 %C, i1 false, i1 true
+ ret i1 %V
+; CHECK: @test5
+; CHECK: xor i1 %C, true
+; CHECK: ret i1
+}
+
+define i32 @test6(i1 %C) {
+ ; V = cast C to int
+ %V = select i1 %C, i32 1, i32 0
+ ret i32 %V
+; CHECK: @test6
+; CHECK: %V = zext i1 %C to i32
+; CHECK: ret i32 %V
+}
+
+define i1 @test7(i1 %C, i1 %X) {
+ ; R = or C, X
+ %R = select i1 %C, i1 true, i1 %X
+ ret i1 %R
+; CHECK: @test7
+; CHECK: %R = or i1 %C, %X
+; CHECK: ret i1 %R
+}
+
+define i1 @test8(i1 %C, i1 %X) {
+ ; R = and C, X
+ %R = select i1 %C, i1 %X, i1 false
+ ret i1 %R
+; CHECK: @test8
+; CHECK: %R = and i1 %C, %X
+; CHECK: ret i1 %R
+}
+
+define i1 @test9(i1 %C, i1 %X) {
+ ; R = and !C, X
+ %R = select i1 %C, i1 false, i1 %X
+ ret i1 %R
+; CHECK: @test9
+; CHECK: xor i1 %C, true
+; CHECK: %R = and i1
+; CHECK: ret i1 %R
+}
+
+define i1 @test10(i1 %C, i1 %X) {
+ ; R = or !C, X
+ %R = select i1 %C, i1 %X, i1 true
+ ret i1 %R
+; CHECK: @test10
+; CHECK: xor i1 %C, true
+; CHECK: %R = or i1
+; CHECK: ret i1 %R
+}
+
+define i32 @test11(i32 %a) {
+ %C = icmp eq i32 %a, 0
+ %R = select i1 %C, i32 0, i32 1
+ ret i32 %R
+; CHECK: @test11
+; CHECK: icmp ne i32 %a, 0
+; CHECK: %R = zext i1
+; CHECK: ret i32 %R
+}
+
+define i32 @test12(i1 %cond, i32 %a) {
+ %b = or i32 %a, 1
+ %c = select i1 %cond, i32 %b, i32 %a
+ ret i32 %c
+; CHECK: @test12
+; CHECK: %b = zext i1 %cond to i32
+; CHECK: %c = or i32 %b, %a
+; CHECK: ret i32 %c
+}
+
+define i32 @test12a(i1 %cond, i32 %a) {
+ %b = ashr i32 %a, 1
+ %c = select i1 %cond, i32 %b, i32 %a
+ ret i32 %c
+; CHECK: @test12a
+; CHECK: %b = zext i1 %cond to i32
+; CHECK: %c = ashr i32 %a, %b
+; CHECK: ret i32 %c
+}
+
+define i32 @test12b(i1 %cond, i32 %a) {
+ %b = ashr i32 %a, 1
+ %c = select i1 %cond, i32 %a, i32 %b
+ ret i32 %c
+; CHECK: @test12b
+; CHECK: zext i1 %cond to i32
+; CHECK: %b = xor i32
+; CHECK: %c = ashr i32 %a, %b
+; CHECK: ret i32 %c
+}
+
+define i32 @test13(i32 %a, i32 %b) {
+ %C = icmp eq i32 %a, %b
+ %V = select i1 %C, i32 %a, i32 %b
+ ret i32 %V
+; CHECK: @test13
+; CHECK: ret i32 %b
+}
+
+define i32 @test13a(i32 %a, i32 %b) {
+ %C = icmp ne i32 %a, %b
+ %V = select i1 %C, i32 %a, i32 %b
+ ret i32 %V
+; CHECK: @test13a
+; CHECK: ret i32 %a
+}
+
+define i32 @test13b(i32 %a, i32 %b) {
+ %C = icmp eq i32 %a, %b
+ %V = select i1 %C, i32 %b, i32 %a
+ ret i32 %V
+; CHECK: @test13b
+; CHECK: ret i32 %a
+}
+
+define i1 @test14a(i1 %C, i32 %X) {
+ %V = select i1 %C, i32 %X, i32 0
+ ; (X < 1) | !C
+ %R = icmp slt i32 %V, 1
+ ret i1 %R
+; CHECK: @test14a
+; CHECK: icmp slt i32 %X, 1
+; CHECK: xor i1 %C, true
+; CHECK: or i1
+; CHECK: ret i1 %R
+}
+
+define i1 @test14b(i1 %C, i32 %X) {
+ %V = select i1 %C, i32 0, i32 %X
+ ; (X < 1) | C
+ %R = icmp slt i32 %V, 1
+ ret i1 %R
+; CHECK: @test14b
+; CHECK: icmp slt i32 %X, 1
+; CHECK: or i1
+; CHECK: ret i1 %R
+}
+
+;; Code sequence for (X & 16) ? 16 : 0
+define i32 @test15a(i32 %X) {
+ %t1 = and i32 %X, 16
+ %t2 = icmp eq i32 %t1, 0
+ %t3 = select i1 %t2, i32 0, i32 16
+ ret i32 %t3
+; CHECK: @test15a
+; CHECK: %t1 = and i32 %X, 16
+; CHECK: ret i32 %t1
+}
+
+;; Code sequence for (X & 32) ? 0 : 24
+define i32 @test15b(i32 %X) {
+ %t1 = and i32 %X, 32
+ %t2 = icmp eq i32 %t1, 0
+ %t3 = select i1 %t2, i32 32, i32 0
+ ret i32 %t3
+; CHECK: @test15b
+; CHECK: %t1 = and i32 %X, 32
+; CHECK: xor i32 %t1, 32
+; CHECK: ret i32
+}
+
+;; Alternate code sequence for (X & 16) ? 16 : 0
+define i32 @test15c(i32 %X) {
+ %t1 = and i32 %X, 16
+ %t2 = icmp eq i32 %t1, 16
+ %t3 = select i1 %t2, i32 16, i32 0
+ ret i32 %t3
+; CHECK: @test15c
+; CHECK: %t1 = and i32 %X, 16
+; CHECK: ret i32 %t1
+}
+
+;; Alternate code sequence for (X & 16) ? 16 : 0
+define i32 @test15d(i32 %X) {
+ %t1 = and i32 %X, 16
+ %t2 = icmp ne i32 %t1, 0
+ %t3 = select i1 %t2, i32 16, i32 0
+ ret i32 %t3
+; CHECK: @test15d
+; CHECK: %t1 = and i32 %X, 16
+; CHECK: ret i32 %t1
+}
+
+define i32 @test16(i1 %C, i32* %P) {
+ %P2 = select i1 %C, i32* %P, i32* null
+ %V = load i32* %P2
+ ret i32 %V
+; CHECK: @test16
+; CHECK-NEXT: %V = load i32* %P
+; CHECK: ret i32 %V
+}
+
+define i1 @test17(i32* %X, i1 %C) {
+ %R = select i1 %C, i32* %X, i32* null
+ %RV = icmp eq i32* %R, null
+ ret i1 %RV
+; CHECK: @test17
+; CHECK: icmp eq i32* %X, null
+; CHECK: xor i1 %C, true
+; CHECK: %RV = or i1
+; CHECK: ret i1 %RV
+}
+
+define i32 @test18(i32 %X, i32 %Y, i1 %C) {
+ %R = select i1 %C, i32 %X, i32 0
+ %V = sdiv i32 %Y, %R
+ ret i32 %V
+; CHECK: @test18
+; CHECK: %V = sdiv i32 %Y, %X
+; CHECK: ret i32 %V
+}
+
+define i32 @test19(i32 %x) {
+ %tmp = icmp ugt i32 %x, 2147483647
+ %retval = select i1 %tmp, i32 -1, i32 0
+ ret i32 %retval
+; CHECK: @test19
+; CHECK-NEXT: ashr i32 %x, 31
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test20(i32 %x) {
+ %tmp = icmp slt i32 %x, 0
+ %retval = select i1 %tmp, i32 -1, i32 0
+ ret i32 %retval
+; CHECK: @test20
+; CHECK-NEXT: ashr i32 %x, 31
+; CHECK-NEXT: ret i32
+}
+
+define i64 @test21(i32 %x) {
+ %tmp = icmp slt i32 %x, 0
+ %retval = select i1 %tmp, i64 -1, i64 0
+ ret i64 %retval
+; CHECK: @test21
+; CHECK-NEXT: ashr i32 %x, 31
+; CHECK-NEXT: sext i32
+; CHECK-NEXT: ret i64
+}
+
+define i16 @test22(i32 %x) {
+ %tmp = icmp slt i32 %x, 0
+ %retval = select i1 %tmp, i16 -1, i16 0
+ ret i16 %retval
+; CHECK: @test22
+; CHECK-NEXT: ashr i32 %x, 31
+; CHECK-NEXT: trunc i32
+; CHECK-NEXT: ret i16
+}
+
+define i1 @test23(i1 %a, i1 %b) {
+ %c = select i1 %a, i1 %b, i1 %a
+ ret i1 %c
+; CHECK: @test23
+; CHECK-NEXT: %c = and i1 %a, %b
+; CHECK-NEXT: ret i1 %c
+}
+
+define i1 @test24(i1 %a, i1 %b) {
+ %c = select i1 %a, i1 %a, i1 %b
+ ret i1 %c
+; CHECK: @test24
+; CHECK-NEXT: %c = or i1 %a, %b
+; CHECK-NEXT: ret i1 %c
+}
+
+define i32 @test25(i1 %c) {
+entry:
+ br i1 %c, label %jump, label %ret
+jump:
+ br label %ret
+ret:
+ %a = phi i1 [true, %jump], [false, %entry]
+ %b = select i1 %a, i32 10, i32 20
+ ret i32 %b
+; CHECK: @test25
+; CHECK: %a = phi i32 [ 10, %jump ], [ 20, %entry ]
+; CHECK-NEXT: ret i32 %a
+}
+
+define i32 @test26(i1 %cond) {
+entry:
+ br i1 %cond, label %jump, label %ret
+jump:
+ %c = or i1 false, false
+ br label %ret
+ret:
+ %a = phi i1 [true, %jump], [%c, %entry]
+ %b = select i1 %a, i32 10, i32 20
+ ret i32 %b
+; CHECK: @test26
+; CHECK: %a = phi i32 [ 10, %jump ], [ 20, %entry ]
+; CHECK-NEXT: ret i32 %a
+}
+
+define i32 @test27(i1 %c, i32 %A, i32 %B) {
+entry:
+ br i1 %c, label %jump, label %ret
+jump:
+ br label %ret
+ret:
+ %a = phi i1 [true, %jump], [false, %entry]
+ %b = select i1 %a, i32 %A, i32 %B
+ ret i32 %b
+; CHECK: @test27
+; CHECK: %a = phi i32 [ %A, %jump ], [ %B, %entry ]
+; CHECK-NEXT: ret i32 %a
+}
+
+define i32 @test28(i1 %cond, i32 %A, i32 %B) {
+entry:
+ br i1 %cond, label %jump, label %ret
+jump:
+ br label %ret
+ret:
+ %c = phi i32 [%A, %jump], [%B, %entry]
+ %a = phi i1 [true, %jump], [false, %entry]
+ %b = select i1 %a, i32 %A, i32 %c
+ ret i32 %b
+; CHECK: @test28
+; CHECK: %a = phi i32 [ %A, %jump ], [ %B, %entry ]
+; CHECK-NEXT: ret i32 %a
+}
+
+define i32 @test29(i1 %cond, i32 %A, i32 %B) {
+entry:
+ br i1 %cond, label %jump, label %ret
+jump:
+ br label %ret
+ret:
+ %c = phi i32 [%A, %jump], [%B, %entry]
+ %a = phi i1 [true, %jump], [false, %entry]
+ br label %next
+
+next:
+ %b = select i1 %a, i32 %A, i32 %c
+ ret i32 %b
+; CHECK: @test29
+; CHECK: %a = phi i32 [ %A, %jump ], [ %B, %entry ]
+; CHECK: ret i32 %a
+}
+
+
+; SMAX(SMAX(x, y), x) -> SMAX(x, y)
+define i32 @test30(i32 %x, i32 %y) {
+ %cmp = icmp sgt i32 %x, %y
+ %cond = select i1 %cmp, i32 %x, i32 %y
+
+ %cmp5 = icmp sgt i32 %cond, %x
+ %retval = select i1 %cmp5, i32 %cond, i32 %x
+ ret i32 %retval
+; CHECK: @test30
+; CHECK: ret i32 %cond
+}
+
+; UMAX(UMAX(x, y), x) -> UMAX(x, y)
+define i32 @test31(i32 %x, i32 %y) {
+ %cmp = icmp ugt i32 %x, %y
+ %cond = select i1 %cmp, i32 %x, i32 %y
+ %cmp5 = icmp ugt i32 %cond, %x
+ %retval = select i1 %cmp5, i32 %cond, i32 %x
+ ret i32 %retval
+; CHECK: @test31
+; CHECK: ret i32 %cond
+}
+
+; SMIN(SMIN(x, y), x) -> SMIN(x, y)
+define i32 @test32(i32 %x, i32 %y) {
+ %cmp = icmp sgt i32 %x, %y
+ %cond = select i1 %cmp, i32 %y, i32 %x
+ %cmp5 = icmp sgt i32 %cond, %x
+ %retval = select i1 %cmp5, i32 %x, i32 %cond
+ ret i32 %retval
+; CHECK: @test32
+; CHECK: ret i32 %cond
+}
+
+; MAX(MIN(x, y), x) -> x
+define i32 @test33(i32 %x, i32 %y) {
+ %cmp = icmp sgt i32 %x, %y
+ %cond = select i1 %cmp, i32 %y, i32 %x
+ %cmp5 = icmp sgt i32 %cond, %x
+ %retval = select i1 %cmp5, i32 %cond, i32 %x
+ ret i32 %retval
+; CHECK: @test33
+; CHECK: ret i32 %x
+}
+
+; MIN(MAX(x, y), x) -> x
+define i32 @test34(i32 %x, i32 %y) {
+ %cmp = icmp sgt i32 %x, %y
+ %cond = select i1 %cmp, i32 %x, i32 %y
+ %cmp5 = icmp sgt i32 %cond, %x
+ %retval = select i1 %cmp5, i32 %x, i32 %cond
+ ret i32 %retval
+; CHECK: @test34
+; CHECK: ret i32 %x
+}
diff --git a/test/Transforms/InstCombine/set.ll b/test/Transforms/InstCombine/set.ll
new file mode 100644
index 0000000..daa9148
--- /dev/null
+++ b/test/Transforms/InstCombine/set.ll
@@ -0,0 +1,171 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | not grep icmp
+; END.
+
+@X = external global i32 ; <i32*> [#uses=2]
+
+define i1 @test1(i32 %A) {
+ %B = icmp eq i32 %A, %A ; <i1> [#uses=1]
+ ; Never true
+ %C = icmp eq i32* @X, null ; <i1> [#uses=1]
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test2(i32 %A) {
+ %B = icmp ne i32 %A, %A ; <i1> [#uses=1]
+ ; Never false
+ %C = icmp ne i32* @X, null ; <i1> [#uses=1]
+ %D = or i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test3(i32 %A) {
+ %B = icmp slt i32 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+
+define i1 @test4(i32 %A) {
+ %B = icmp sgt i32 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test5(i32 %A) {
+ %B = icmp sle i32 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test6(i32 %A) {
+ %B = icmp sge i32 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test7(i32 %A) {
+ ; true
+ %B = icmp uge i32 %A, 0 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test8(i32 %A) {
+ ; false
+ %B = icmp ult i32 %A, 0 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+;; test operations on boolean values these should all be eliminated$a
+define i1 @test9(i1 %A) {
+ ; false
+ %B = icmp ult i1 %A, false ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test10(i1 %A) {
+ ; false
+ %B = icmp ugt i1 %A, true ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test11(i1 %A) {
+ ; true
+ %B = icmp ule i1 %A, true ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test12(i1 %A) {
+ ; true
+ %B = icmp uge i1 %A, false ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test13(i1 %A, i1 %B) {
+ ; A | ~B
+ %C = icmp uge i1 %A, %B ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test14(i1 %A, i1 %B) {
+ ; ~(A ^ B)
+ %C = icmp eq i1 %A, %B ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test16(i32 %A) {
+ %B = and i32 %A, 5 ; <i32> [#uses=1]
+ ; Is never true
+ %C = icmp eq i32 %B, 8 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test17(i8 %A) {
+ %B = or i8 %A, 1 ; <i8> [#uses=1]
+ ; Always false
+ %C = icmp eq i8 %B, 2 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test18(i1 %C, i32 %a) {
+entry:
+ br i1 %C, label %endif, label %else
+
+else: ; preds = %entry
+ br label %endif
+
+endif: ; preds = %else, %entry
+ %b.0 = phi i32 [ 0, %entry ], [ 1, %else ] ; <i32> [#uses=1]
+ %tmp.4 = icmp slt i32 %b.0, 123 ; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
+
+define i1 @test19(i1 %A, i1 %B) {
+ %a = zext i1 %A to i32 ; <i32> [#uses=1]
+ %b = zext i1 %B to i32 ; <i32> [#uses=1]
+ %C = icmp eq i32 %a, %b ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i32 @test20(i32 %A) {
+ %B = and i32 %A, 1 ; <i32> [#uses=1]
+ %C = icmp ne i32 %B, 0 ; <i1> [#uses=1]
+ %D = zext i1 %C to i32 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test21(i32 %a) {
+ %tmp.6 = and i32 %a, 4 ; <i32> [#uses=1]
+ %not.tmp.7 = icmp ne i32 %tmp.6, 0 ; <i1> [#uses=1]
+ %retval = zext i1 %not.tmp.7 to i32 ; <i32> [#uses=1]
+ ret i32 %retval
+}
+
+define i1 @test22(i32 %A, i32 %X) {
+ %B = and i32 %A, 100663295 ; <i32> [#uses=1]
+ %C = icmp ult i32 %B, 268435456 ; <i1> [#uses=1]
+ %Y = and i32 %X, 7 ; <i32> [#uses=1]
+ %Z = icmp sgt i32 %Y, -1 ; <i1> [#uses=1]
+ %R = or i1 %C, %Z ; <i1> [#uses=1]
+ ret i1 %R
+}
+
+define i32 @test23(i32 %a) {
+ %tmp.1 = and i32 %a, 1 ; <i32> [#uses=1]
+ %tmp.2 = icmp eq i32 %tmp.1, 0 ; <i1> [#uses=1]
+ %tmp.3 = zext i1 %tmp.2 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+}
+
+define i32 @test24(i32 %a) {
+ %tmp1 = and i32 %a, 4 ; <i32> [#uses=1]
+ %tmp.1 = lshr i32 %tmp1, 2 ; <i32> [#uses=1]
+ %tmp.2 = icmp eq i32 %tmp.1, 0 ; <i1> [#uses=1]
+ %tmp.3 = zext i1 %tmp.2 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+}
+
+define i1 @test25(i32 %A) {
+ %B = and i32 %A, 2 ; <i32> [#uses=1]
+ %C = icmp ugt i32 %B, 2 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
diff --git a/test/Transforms/InstCombine/setcc-strength-reduce.ll b/test/Transforms/InstCombine/setcc-strength-reduce.ll
new file mode 100644
index 0000000..62ab116
--- /dev/null
+++ b/test/Transforms/InstCombine/setcc-strength-reduce.ll
@@ -0,0 +1,37 @@
+; This test ensures that "strength reduction" of conditional expressions are
+; working. Basically this boils down to converting setlt,gt,le,ge instructions
+; into equivalent setne,eq instructions.
+;
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v {icmp eq} | grep -v {icmp ne} | not grep icmp
+; END.
+
+define i1 @test1(i32 %A) {
+ ; setne %A, 0
+ %B = icmp uge i32 %A, 1 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test2(i32 %A) {
+ ; setne %A, 0
+ %B = icmp ugt i32 %A, 0 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test3(i8 %A) {
+ ; setne %A, -128
+ %B = icmp sge i8 %A, -127 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test4(i8 %A) {
+ ; setne %A, 127
+ %B = icmp sle i8 %A, 126 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test5(i8 %A) {
+ ; setne %A, 127
+ %B = icmp slt i8 %A, 127 ; <i1> [#uses=1]
+ ret i1 %B
+}
diff --git a/test/Transforms/InstCombine/sext.ll b/test/Transforms/InstCombine/sext.ll
new file mode 100644
index 0000000..6deee1f
--- /dev/null
+++ b/test/Transforms/InstCombine/sext.ll
@@ -0,0 +1,128 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+declare i32 @llvm.ctpop.i32(i32)
+declare i32 @llvm.ctlz.i32(i32)
+declare i32 @llvm.cttz.i32(i32)
+
+define i64 @test1(i32 %x) {
+ %t = call i32 @llvm.ctpop.i32(i32 %x)
+ %s = sext i32 %t to i64
+ ret i64 %s
+
+; CHECK: @test1
+; CHECK: zext i32 %t
+}
+
+define i64 @test2(i32 %x) {
+ %t = call i32 @llvm.ctlz.i32(i32 %x)
+ %s = sext i32 %t to i64
+ ret i64 %s
+
+; CHECK: @test2
+; CHECK: zext i32 %t
+}
+
+define i64 @test3(i32 %x) {
+ %t = call i32 @llvm.cttz.i32(i32 %x)
+ %s = sext i32 %t to i64
+ ret i64 %s
+
+; CHECK: @test3
+; CHECK: zext i32 %t
+}
+
+define i64 @test4(i32 %x) {
+ %t = udiv i32 %x, 3
+ %s = sext i32 %t to i64
+ ret i64 %s
+
+; CHECK: @test4
+; CHECK: zext i32 %t
+}
+
+define i64 @test5(i32 %x) {
+ %t = urem i32 %x, 30000
+ %s = sext i32 %t to i64
+ ret i64 %s
+; CHECK: @test5
+; CHECK: zext i32 %t
+}
+
+define i64 @test6(i32 %x) {
+ %u = lshr i32 %x, 3
+ %t = mul i32 %u, 3
+ %s = sext i32 %t to i64
+ ret i64 %s
+; CHECK: @test6
+; CHECK: zext i32 %t
+}
+
+define i64 @test7(i32 %x) {
+ %t = and i32 %x, 511
+ %u = sub i32 20000, %t
+ %s = sext i32 %u to i64
+ ret i64 %s
+; CHECK: @test7
+; CHECK: zext i32 %u to i64
+}
+
+define i32 @test8(i8 %a, i32 %f, i1 %p, i32* %z) {
+ %d = lshr i32 %f, 24
+ %e = select i1 %p, i32 %d, i32 0
+ %s = trunc i32 %e to i16
+ %n = sext i16 %s to i32
+ ret i32 %n
+; CHECK: @test8
+; CHECK: %d = lshr i32 %f, 24
+; CHECK: %n = select i1 %p, i32 %d, i32 0
+; CHECK: ret i32 %n
+}
+
+; rdar://6013816
+define i16 @test9(i16 %t, i1 %cond) nounwind {
+entry:
+ br i1 %cond, label %T, label %F
+T:
+ %t2 = sext i16 %t to i32
+ br label %F
+
+F:
+ %V = phi i32 [%t2, %T], [42, %entry]
+ %W = trunc i32 %V to i16
+ ret i16 %W
+; CHECK: @test9
+; CHECK: T:
+; CHECK-NEXT: br label %F
+; CHECK: F:
+; CHECK-NEXT: phi i16
+; CHECK-NEXT: ret i16
+}
+
+; PR2638
+define i32 @test10(i32 %i) nounwind {
+entry:
+ %tmp12 = trunc i32 %i to i8
+ %tmp16 = shl i8 %tmp12, 6
+ %a = ashr i8 %tmp16, 6
+ %b = sext i8 %a to i32
+ ret i32 %b
+; CHECK: @test10
+; CHECK: shl i32 %i, 30
+; CHECK-NEXT: ashr i32
+; CHECK-NEXT: ret i32
+}
+
+define void @test11(<2 x i16> %srcA, <2 x i16> %srcB, <2 x i16>* %dst) {
+ %cmp = icmp eq <2 x i16> %srcB, %srcA
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %tmask = ashr <2 x i16> %sext, <i16 15, i16 15>
+ store <2 x i16> %tmask, <2 x i16>* %dst
+ ret void
+; CHECK: @test11
+; CHECK-NEXT: icmp eq
+; CHECK-NEXT: sext <2 x i1>
+; CHECK-NEXT: store <2 x i16>
+; CHECK-NEXT: ret
+}
diff --git a/test/Transforms/InstCombine/shift-simplify.ll b/test/Transforms/InstCombine/shift-simplify.ll
new file mode 100644
index 0000000..e5cc705
--- /dev/null
+++ b/test/Transforms/InstCombine/shift-simplify.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: egrep {shl|lshr|ashr} | count 3
+
+define i32 @test0(i32 %A, i32 %B, i32 %C) {
+ %X = shl i32 %A, %C
+ %Y = shl i32 %B, %C
+ %Z = and i32 %X, %Y
+ ret i32 %Z
+}
+
+define i32 @test1(i32 %A, i32 %B, i32 %C) {
+ %X = lshr i32 %A, %C
+ %Y = lshr i32 %B, %C
+ %Z = or i32 %X, %Y
+ ret i32 %Z
+}
+
+define i32 @test2(i32 %A, i32 %B, i32 %C) {
+ %X = ashr i32 %A, %C
+ %Y = ashr i32 %B, %C
+ %Z = xor i32 %X, %Y
+ ret i32 %Z
+}
+
+define i1 @test3(i32 %X) {
+ %tmp1 = shl i32 %X, 7
+ %tmp2 = icmp slt i32 %tmp1, 0
+ ret i1 %tmp2
+}
+
+define i1 @test4(i32 %X) {
+ %tmp1 = lshr i32 %X, 7
+ %tmp2 = icmp slt i32 %tmp1, 0
+ ret i1 %tmp2
+}
+
+define i1 @test5(i32 %X) {
+ %tmp1 = ashr i32 %X, 7
+ %tmp2 = icmp slt i32 %tmp1, 0
+ ret i1 %tmp2
+}
+
diff --git a/test/Transforms/InstCombine/shift-sra.ll b/test/Transforms/InstCombine/shift-sra.ll
new file mode 100644
index 0000000..58f3226
--- /dev/null
+++ b/test/Transforms/InstCombine/shift-sra.ll
@@ -0,0 +1,58 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+
+define i32 @test1(i32 %X, i8 %A) {
+ %shift.upgrd.1 = zext i8 %A to i32 ; <i32> [#uses=1]
+ ; can be logical shift.
+ %Y = ashr i32 %X, %shift.upgrd.1 ; <i32> [#uses=1]
+ %Z = and i32 %Y, 1 ; <i32> [#uses=1]
+ ret i32 %Z
+; CHECK: @test1
+; CHECK: lshr i32 %X, %shift.upgrd.1
+}
+
+define i32 @test2(i8 %tmp) {
+ %tmp3 = zext i8 %tmp to i32 ; <i32> [#uses=1]
+ %tmp4 = add i32 %tmp3, 7 ; <i32> [#uses=1]
+ %tmp5 = ashr i32 %tmp4, 3 ; <i32> [#uses=1]
+ ret i32 %tmp5
+; CHECK: @test2
+; CHECK: lshr i32 %tmp4, 3
+}
+
+define i64 @test3(i1 %X, i64 %Y, i1 %Cond) {
+ br i1 %Cond, label %T, label %F
+T:
+ %X2 = sext i1 %X to i64
+ br label %C
+F:
+ %Y2 = ashr i64 %Y, 63
+ br label %C
+C:
+ %P = phi i64 [%X2, %T], [%Y2, %F]
+ %S = ashr i64 %P, 12
+ ret i64 %S
+
+; CHECK: @test3
+; CHECK: %P = phi i64
+; CHECK-NEXT: ret i64 %P
+}
+
+define i64 @test4(i1 %X, i64 %Y, i1 %Cond) {
+ br i1 %Cond, label %T, label %F
+T:
+ %X2 = sext i1 %X to i64
+ br label %C
+F:
+ %Y2 = ashr i64 %Y, 63
+ br label %C
+C:
+ %P = phi i64 [%X2, %T], [%Y2, %F]
+ %R = shl i64 %P, 12
+ %S = ashr i64 %R, 12
+ ret i64 %S
+
+; CHECK: @test4
+; CHECK: %P = phi i64
+; CHECK-NEXT: ret i64 %P
+}
diff --git a/test/Transforms/InstCombine/shift-trunc-shift.ll b/test/Transforms/InstCombine/shift-trunc-shift.ll
new file mode 100644
index 0000000..7133d29
--- /dev/null
+++ b/test/Transforms/InstCombine/shift-trunc-shift.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep lshr.*63
+
+define i32 @t1(i64 %d18) {
+entry:
+ %tmp916 = lshr i64 %d18, 32 ; <i64> [#uses=1]
+ %tmp917 = trunc i64 %tmp916 to i32 ; <i32> [#uses=1]
+ %tmp10 = lshr i32 %tmp917, 31 ; <i32> [#uses=1]
+ ret i32 %tmp10
+}
+
diff --git a/test/Transforms/InstCombine/shift.ll b/test/Transforms/InstCombine/shift.ll
new file mode 100644
index 0000000..feed37b
--- /dev/null
+++ b/test/Transforms/InstCombine/shift.ll
@@ -0,0 +1,345 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @test1(i32 %A) {
+; CHECK: @test1
+; CHECK: ret i32 %A
+ %B = shl i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test2(i8 %A) {
+; CHECK: @test2
+; CHECK: ret i32 0
+ %shift.upgrd.1 = zext i8 %A to i32 ; <i32> [#uses=1]
+ %B = shl i32 0, %shift.upgrd.1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test3(i32 %A) {
+; CHECK: @test3
+; CHECK: ret i32 %A
+ %B = ashr i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test4(i8 %A) {
+; CHECK: @test4
+; CHECK: ret i32 0
+ %shift.upgrd.2 = zext i8 %A to i32 ; <i32> [#uses=1]
+ %B = ashr i32 0, %shift.upgrd.2 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+
+define i32 @test5(i32 %A) {
+; CHECK: @test5
+; CHECK: ret i32 0
+ %B = lshr i32 %A, 32 ;; shift all bits out
+ ret i32 %B
+}
+
+define i32 @test5a(i32 %A) {
+; CHECK: @test5a
+; CHECK: ret i32 0
+ %B = shl i32 %A, 32 ;; shift all bits out
+ ret i32 %B
+}
+
+define i32 @test6(i32 %A) {
+; CHECK: @test6
+; CHECK-NEXT: mul i32 %A, 6
+; CHECK-NEXT: ret i32
+ %B = shl i32 %A, 1 ;; convert to an mul instruction
+ %C = mul i32 %B, 3
+ ret i32 %C
+}
+
+define i32 @test7(i8 %A) {
+; CHECK: @test7
+; CHECK-NEXT: ret i32 -1
+ %shift.upgrd.3 = zext i8 %A to i32
+ %B = ashr i32 -1, %shift.upgrd.3 ;; Always equal to -1
+ ret i32 %B
+}
+
+;; (A << 5) << 3 === A << 8 == 0
+define i8 @test8(i8 %A) {
+; CHECK: @test8
+; CHECK: ret i8 0
+ %B = shl i8 %A, 5 ; <i8> [#uses=1]
+ %C = shl i8 %B, 3 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; (A << 7) >> 7 === A & 1
+define i8 @test9(i8 %A) {
+; CHECK: @test9
+; CHECK-NEXT: and i8 %A, 1
+; CHECK-NEXT: ret i8
+ %B = shl i8 %A, 7 ; <i8> [#uses=1]
+ %C = lshr i8 %B, 7 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; (A >> 7) << 7 === A & 128
+define i8 @test10(i8 %A) {
+; CHECK: @test10
+; CHECK-NEXT: and i8 %A, -128
+; CHECK-NEXT: ret i8
+ %B = lshr i8 %A, 7 ; <i8> [#uses=1]
+ %C = shl i8 %B, 7 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; (A >> 3) << 4 === (A & 0x1F) << 1
+define i8 @test11(i8 %A) {
+; CHECK: @test11
+; CHECK-NEXT: mul i8 %A, 6
+; CHECK-NEXT: and i8
+; CHECK-NEXT: ret i8
+ %a = mul i8 %A, 3 ; <i8> [#uses=1]
+ %B = lshr i8 %a, 3 ; <i8> [#uses=1]
+ %C = shl i8 %B, 4 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; (A >> 8) << 8 === A & -256
+define i32 @test12(i32 %A) {
+; CHECK: @test12
+; CHECK-NEXT: and i32 %A, -256
+; CHECK-NEXT: ret i32
+ %B = ashr i32 %A, 8 ; <i32> [#uses=1]
+ %C = shl i32 %B, 8 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+;; (A >> 3) << 4 === (A & -8) * 2
+define i8 @test13(i8 %A) {
+; CHECK: @test13
+; CHECK-NEXT: mul i8 %A, 6
+; CHECK-NEXT: and i8
+; CHECK-NEXT: ret i8
+ %a = mul i8 %A, 3 ; <i8> [#uses=1]
+ %B = ashr i8 %a, 3 ; <i8> [#uses=1]
+ %C = shl i8 %B, 4 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; D = ((B | 1234) << 4) === ((B << 4)|(1234 << 4)
+define i32 @test14(i32 %A) {
+; CHECK: @test14
+; CHECK-NEXT: or i32 %A, 19744
+; CHECK-NEXT: and i32
+; CHECK-NEXT: ret i32
+ %B = lshr i32 %A, 4 ; <i32> [#uses=1]
+ %C = or i32 %B, 1234 ; <i32> [#uses=1]
+ %D = shl i32 %C, 4 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+;; D = ((B | 1234) << 4) === ((B << 4)|(1234 << 4)
+define i32 @test14a(i32 %A) {
+; CHECK: @test14a
+; CHECK-NEXT: and i32 %A, 77
+; CHECK-NEXT: ret i32
+ %B = shl i32 %A, 4 ; <i32> [#uses=1]
+ %C = and i32 %B, 1234 ; <i32> [#uses=1]
+ %D = lshr i32 %C, 4 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test15(i1 %C) {
+; CHECK: @test15
+; CHECK-NEXT: select i1 %C, i32 12, i32 4
+; CHECK-NEXT: ret i32
+ %A = select i1 %C, i32 3, i32 1 ; <i32> [#uses=1]
+ %V = shl i32 %A, 2 ; <i32> [#uses=1]
+ ret i32 %V
+}
+
+define i32 @test15a(i1 %C) {
+; CHECK: @test15a
+; CHECK-NEXT: select i1 %C, i32 512, i32 128
+; CHECK-NEXT: ret i32
+ %A = select i1 %C, i8 3, i8 1 ; <i8> [#uses=1]
+ %shift.upgrd.4 = zext i8 %A to i32 ; <i32> [#uses=1]
+ %V = shl i32 64, %shift.upgrd.4 ; <i32> [#uses=1]
+ ret i32 %V
+}
+
+define i1 @test16(i32 %X) {
+; CHECK: @test16
+; CHECK-NEXT: and i32 %X, 16
+; CHECK-NEXT: icmp ne i32
+; CHECK-NEXT: ret i1
+ %tmp.3 = ashr i32 %X, 4
+ %tmp.6 = and i32 %tmp.3, 1
+ %tmp.7 = icmp ne i32 %tmp.6, 0
+ ret i1 %tmp.7
+}
+
+define i1 @test17(i32 %A) {
+; CHECK: @test17
+; CHECK-NEXT: and i32 %A, -8
+; CHECK-NEXT: icmp eq i32
+; CHECK-NEXT: ret i1
+ %B = lshr i32 %A, 3 ; <i32> [#uses=1]
+ %C = icmp eq i32 %B, 1234 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+
+define i1 @test18(i8 %A) {
+; CHECK: @test18
+; CHECK: ret i1 false
+
+ %B = lshr i8 %A, 7 ; <i8> [#uses=1]
+ ;; false
+ %C = icmp eq i8 %B, 123 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test19(i32 %A) {
+; CHECK: @test19
+; CHECK-NEXT: icmp ult i32 %A, 4
+; CHECK-NEXT: ret i1
+ %B = ashr i32 %A, 2 ; <i32> [#uses=1]
+ ;; (X & -4) == 0
+ %C = icmp eq i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+
+define i1 @test19a(i32 %A) {
+; CHECK: @test19a
+; CHECK-NEXT: and i32 %A, -4
+; CHECK-NEXT: icmp eq i32
+; CHECK-NEXT: ret i1
+ %B = ashr i32 %A, 2 ; <i32> [#uses=1]
+ ;; (X & -4) == -4
+ %C = icmp eq i32 %B, -1 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test20(i8 %A) {
+; CHECK: @test20
+; CHECK: ret i1 false
+ %B = ashr i8 %A, 7 ; <i8> [#uses=1]
+ ;; false
+ %C = icmp eq i8 %B, 123 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test21(i8 %A) {
+; CHECK: @test21
+; CHECK-NEXT: and i8 %A, 15
+; CHECK-NEXT: icmp eq i8
+; CHECK-NEXT: ret i1
+ %B = shl i8 %A, 4 ; <i8> [#uses=1]
+ %C = icmp eq i8 %B, -128 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test22(i8 %A) {
+; CHECK: @test22
+; CHECK-NEXT: and i8 %A, 15
+; CHECK-NEXT: icmp eq i8
+; CHECK-NEXT: ret i1
+ %B = shl i8 %A, 4 ; <i8> [#uses=1]
+ %C = icmp eq i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i8 @test23(i32 %A) {
+; CHECK: @test23
+; CHECK-NEXT: trunc i32 %A to i8
+; CHECK-NEXT: ret i8
+
+ ;; casts not needed
+ %B = shl i32 %A, 24 ; <i32> [#uses=1]
+ %C = ashr i32 %B, 24 ; <i32> [#uses=1]
+ %D = trunc i32 %C to i8 ; <i8> [#uses=1]
+ ret i8 %D
+}
+
+define i8 @test24(i8 %X) {
+; CHECK: @test24
+; CHECK-NEXT: and i8 %X, 3
+; CHECK-NEXT: ret i8
+ %Y = and i8 %X, -5 ; <i8> [#uses=1]
+ %Z = shl i8 %Y, 5 ; <i8> [#uses=1]
+ %Q = ashr i8 %Z, 5 ; <i8> [#uses=1]
+ ret i8 %Q
+}
+
+define i32 @test25(i32 %tmp.2, i32 %AA) {
+; CHECK: @test25
+; CHECK-NEXT: and i32 %tmp.2, -131072
+; CHECK-NEXT: add i32 %{{[^,]*}}, %AA
+; CHECK-NEXT: and i32 %{{[^,]*}}, -131072
+; CHECK-NEXT: ret i32
+ %x = lshr i32 %AA, 17 ; <i32> [#uses=1]
+ %tmp.3 = lshr i32 %tmp.2, 17 ; <i32> [#uses=1]
+ %tmp.5 = add i32 %tmp.3, %x ; <i32> [#uses=1]
+ %tmp.6 = shl i32 %tmp.5, 17 ; <i32> [#uses=1]
+ ret i32 %tmp.6
+}
+
+;; handle casts between shifts.
+define i32 @test26(i32 %A) {
+; CHECK: @test26
+; CHECK-NEXT: and i32 %A, -2
+; CHECK-NEXT: ret i32
+ %B = lshr i32 %A, 1 ; <i32> [#uses=1]
+ %C = bitcast i32 %B to i32 ; <i32> [#uses=1]
+ %D = shl i32 %C, 1 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+
+define i1 @test27(i32 %x) nounwind {
+; CHECK: @test27
+; CHECK-NEXT: and i32 %x, 8
+; CHECK-NEXT: icmp ne i32
+; CHECK-NEXT: ret i1
+ %y = lshr i32 %x, 3
+ %z = trunc i32 %y to i1
+ ret i1 %z
+}
+
+define i8 @test28(i8 %x) {
+entry:
+; CHECK: @test28
+; CHECK: icmp slt i8 %x, 0
+; CHECK-NEXT: br i1
+ %tmp1 = lshr i8 %x, 7
+ %cond1 = icmp ne i8 %tmp1, 0
+ br i1 %cond1, label %bb1, label %bb2
+
+bb1:
+ ret i8 0
+
+bb2:
+ ret i8 1
+}
+
+define i8 @test28a(i8 %x, i8 %y) {
+entry:
+; This shouldn't be transformed.
+; CHECK: @test28a
+; CHECK: %tmp1 = lshr i8 %x, 7
+; CHECK: %cond1 = icmp eq i8 %tmp1, 0
+; CHECK: br i1 %cond1, label %bb2, label %bb1
+ %tmp1 = lshr i8 %x, 7
+ %cond1 = icmp ne i8 %tmp1, 0
+ br i1 %cond1, label %bb1, label %bb2
+bb1:
+ ret i8 %tmp1
+bb2:
+ %tmp2 = add i8 %tmp1, %y
+ ret i8 %tmp2
+}
+
+
diff --git a/test/Transforms/InstCombine/shufflemask-undef.ll b/test/Transforms/InstCombine/shufflemask-undef.ll
new file mode 100644
index 0000000..cf87aef
--- /dev/null
+++ b/test/Transforms/InstCombine/shufflemask-undef.ll
@@ -0,0 +1,109 @@
+; RUN: opt < %s -instcombine -S | not grep {shufflevector.\*i32 8}
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9"
+ %struct.ActiveTextureTargets = type { i64, i64, i64, i64, i64, i64 }
+ %struct.AlphaTest = type { float, i16, i8, i8 }
+ %struct.ArrayRange = type { i8, i8, i8, i8 }
+ %struct.BlendMode = type { i16, i16, i16, i16, %struct.IColor4, i16, i16, i8, i8, i8, i8 }
+ %struct.ClearColor = type { double, %struct.IColor4, %struct.IColor4, float, i32 }
+ %struct.ClipPlane = type { i32, [6 x %struct.IColor4] }
+ %struct.ColorBuffer = type { i16, i8, i8, [8 x i16], [0 x i32] }
+ %struct.ColorMatrix = type { [16 x float]*, %struct.ImagingColorScale }
+ %struct.Convolution = type { %struct.IColor4, %struct.ImagingColorScale, i16, i16, [0 x i32], float*, i32, i32 }
+ %struct.DepthTest = type { i16, i16, i8, i8, i8, i8, double, double }
+ %struct.FixedFunction = type { %struct.PPStreamToken* }
+ %struct.FogMode = type { %struct.IColor4, float, float, float, float, float, i16, i16, i16, i8, i8 }
+ %struct.HintMode = type { i16, i16, i16, i16, i16, i16, i16, i16, i16, i16 }
+ %struct.Histogram = type { %struct.ProgramLimits*, i32, i16, i8, i8 }
+ %struct.ImagingColorScale = type { %struct.TCoord2, %struct.TCoord2, %struct.TCoord2, %struct.TCoord2 }
+ %struct.ImagingSubset = type { %struct.Convolution, %struct.Convolution, %struct.Convolution, %struct.ColorMatrix, %struct.Minmax, %struct.Histogram, %struct.ImagingColorScale, %struct.ImagingColorScale, %struct.ImagingColorScale, %struct.ImagingColorScale, i32, [0 x i32] }
+ %struct.Light = type { %struct.IColor4, %struct.IColor4, %struct.IColor4, %struct.IColor4, %struct.PointLineLimits, float, float, float, float, float, %struct.PointLineLimits, float, %struct.PointLineLimits, float, %struct.PointLineLimits, float, float, float, float, float }
+ %struct.LightModel = type { %struct.IColor4, [8 x %struct.Light], [2 x %struct.Material], i32, i16, i16, i16, i8, i8, i8, i8, i8, i8 }
+ %struct.LightProduct = type { %struct.IColor4, %struct.IColor4, %struct.IColor4 }
+ %struct.LineMode = type { float, i32, i16, i16, i8, i8, i8, i8 }
+ %struct.LogicOp = type { i16, i8, i8 }
+ %struct.MaskMode = type { i32, [3 x i32], i8, i8, i8, i8, i8, i8, i8, i8 }
+ %struct.Material = type { %struct.IColor4, %struct.IColor4, %struct.IColor4, %struct.IColor4, float, float, float, float, [8 x %struct.LightProduct], %struct.IColor4, [8 x i32] }
+ %struct.Minmax = type { %struct.MinmaxTable*, i16, i8, i8, [0 x i32] }
+ %struct.MinmaxTable = type { %struct.IColor4, %struct.IColor4 }
+ %struct.Mipmaplevel = type { [4 x i32], [4 x i32], [4 x float], [4 x i32], i32, i32, float*, i8*, i16, i16, i16, i16, [2 x float] }
+ %struct.Multisample = type { float, i8, i8, i8, i8, i8, i8, i8, i8 }
+ %struct.PipelineProgramState = type { i8, i8, i8, i8, [0 x i32], %struct.IColor4* }
+ %struct.PixelMap = type { i32*, float*, float*, float*, float*, float*, float*, float*, float*, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+ %struct.PixelMode = type { float, float, %struct.PixelStore, %struct.PixelTransfer, %struct.PixelMap, %struct.ImagingSubset, i32, i32 }
+ %struct.PixelPack = type { i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8 }
+ %struct.PixelStore = type { %struct.PixelPack, %struct.PixelPack }
+ %struct.PixelTransfer = type { float, float, float, float, float, float, float, float, float, float, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float }
+ %struct.PluginBufferData = type { i32 }
+ %struct.PointLineLimits = type { float, float, float }
+ %struct.PointMode = type { float, float, float, float, %struct.PointLineLimits, float, i8, i8, i8, i8, i16, i16, i32, i16, i16 }
+ %struct.PolygonMode = type { [128 x i8], float, float, i16, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8 }
+ %struct.ProgramLimits = type { i32, i32, i32, i32 }
+ %struct.RegisterCombiners = type { i8, i8, i8, i8, i32, [2 x %struct.IColor4], [8 x %struct.RegisterCombinersPerStageState], %struct.RegisterCombinersFinalStageState }
+ %struct.RegisterCombinersFinalStageState = type { i8, i8, i8, i8, [7 x %struct.RegisterCombinersPerVariableState] }
+ %struct.RegisterCombinersPerPortionState = type { [4 x %struct.RegisterCombinersPerVariableState], i8, i8, i8, i8, i16, i16, i16, i16, i16, i16 }
+ %struct.RegisterCombinersPerStageState = type { [2 x %struct.RegisterCombinersPerPortionState], [2 x %struct.IColor4] }
+ %struct.RegisterCombinersPerVariableState = type { i16, i16, i16, i16 }
+ %struct.SWRSurfaceRec = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i8*, [4 x i8*], i32 }
+ %struct.ScissorTest = type { %struct.ProgramLimits, i8, i8, i8, i8 }
+ %struct.State = type <{ i16, i16, i16, i16, i32, i32, [256 x %struct.IColor4], [128 x %struct.IColor4], %struct.Viewport, %struct.Transform, %struct.LightModel, %struct.ActiveTextureTargets, %struct.AlphaTest, %struct.BlendMode, %struct.ClearColor, %struct.ColorBuffer, %struct.DepthTest, %struct.ArrayRange, %struct.FogMode, %struct.HintMode, %struct.LineMode, %struct.LogicOp, %struct.MaskMode, %struct.PixelMode, %struct.PointMode, %struct.PolygonMode, %struct.ScissorTest, i32, %struct.StencilTest, [8 x %struct.TextureMode], [16 x %struct.TextureImageMode], %struct.ArrayRange, [8 x %struct.TextureCoordGen], %struct.ClipPlane, %struct.Multisample, %struct.RegisterCombiners, %struct.ArrayRange, %struct.ArrayRange, [3 x %struct.PipelineProgramState], %struct.ArrayRange, %struct.TransformFeedback, i32*, %struct.FixedFunction, [3 x i32], [3 x i32] }>
+ %struct.StencilTest = type { [3 x { i32, i32, i16, i16, i16, i16 }], i32, [4 x i8] }
+ %struct.TextureCoordGen = type { { i16, i16, %struct.IColor4, %struct.IColor4 }, { i16, i16, %struct.IColor4, %struct.IColor4 }, { i16, i16, %struct.IColor4, %struct.IColor4 }, { i16, i16, %struct.IColor4, %struct.IColor4 }, i8, i8, i8, i8 }
+ %struct.TextureGeomState = type { i16, i16, i16, i16, i16, i8, i8, i8, i8, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, [6 x i16], [6 x i16] }
+ %struct.TextureImageMode = type { float }
+ %struct.TextureLevel = type { i32, i32, i16, i16, i16, i8, i8, i16, i16, i16, i16, i8* }
+ %struct.TextureMode = type { %struct.IColor4, i32, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, float, float, i16, i16, i16, i16, i16, i16, [4 x i16], i8, i8, i8, i8, [3 x float], [4 x float], float, float }
+ %struct.TextureParamState = type { i16, i16, i16, i16, i16, i16, %struct.IColor4, float, float, float, float, i16, i16, i16, i16, float, i16, i8, i8, i32, i8* }
+ %struct.TextureRec = type { [4 x float], %struct.TextureState*, %struct.Mipmaplevel*, %struct.Mipmaplevel*, float, float, float, float, i8, i8, i8, i8, i16, i16, i16, i16, i32, float, [2 x %struct.PPStreamToken] }
+ %struct.TextureState = type { i16, i8, i8, i16, i16, float, i32, %struct.SWRSurfaceRec*, %struct.TextureParamState, %struct.TextureGeomState, [0 x i32], i8*, i32, %struct.TextureLevel, [1 x [15 x %struct.TextureLevel]] }
+ %struct.Transform = type <{ [24 x [16 x float]], [24 x [16 x float]], [16 x float], float, float, float, float, float, i8, i8, i8, i8, i32, i32, i32, i16, i16, i8, i8, i8, i8, i32 }>
+ %struct.TransformFeedback = type { i8, i8, i8, i8, [0 x i32], [16 x i32], [16 x i32] }
+ %struct.Viewport = type { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, double, double, i32, i32, i32, i32, float, float, float, float }
+ %struct.IColor4 = type { float, float, float, float }
+ %struct.TCoord2 = type { float, float }
+ %struct.VMGPStack = type { [6 x <4 x float>*], <4 x float>*, i32, i32, <4 x float>*, <4 x float>**, i32, i32, i32, i32, i32, i32 }
+ %struct.VMTextures = type { [16 x %struct.TextureRec*] }
+ %struct.PPStreamToken = type { { i16, i16, i32 } }
+ %struct._VMConstants = type { <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, float, float, float, float, float, float, float, float, float, float, float, float, [256 x float], [528 x i8], { void (i8*, i8*, i32, i8*)*, float (float)*, float (float)*, float (float)*, i32 (float)* } }
+
+define i32 @foo(%struct.State* %dst, <4 x float>* %prgrm, <4 x float>** %buffs, %struct._VMConstants* %cnstn, %struct.PPStreamToken* %pstrm, %struct.PluginBufferData* %gpctx, %struct.VMTextures* %txtrs, %struct.VMGPStack* %gpstk) nounwind {
+bb266.i:
+ getelementptr <4 x float>* null, i32 11 ; <<4 x float>*>:0 [#uses=1]
+ load <4 x float>* %0, align 16 ; <<4 x float>>:1 [#uses=1]
+ shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> < i32 0, i32 1, i32 1, i32 1 > ; <<4 x float>>:2 [#uses=1]
+ shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:3 [#uses=1]
+ shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:4 [#uses=1]
+ shufflevector <4 x float> %4, <4 x float> %3, <4 x i32> < i32 6, i32 7, i32 2, i32 3 > ; <<4 x float>>:5 [#uses=1]
+ fmul <4 x float> %5, zeroinitializer ; <<4 x float>>:6 [#uses=2]
+ fmul <4 x float> %6, %6 ; <<4 x float>>:7 [#uses=1]
+ fadd <4 x float> zeroinitializer, %7 ; <<4 x float>>:8 [#uses=1]
+ call <4 x float> @llvm.x86.sse.max.ps( <4 x float> zeroinitializer, <4 x float> %8 ) nounwind readnone ; <<4 x float>>:9 [#uses=1]
+ %phitmp40 = bitcast <4 x float> %9 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp4109.i = and <4 x i32> %phitmp40, < i32 8388607, i32 8388607, i32 8388607, i32 8388607 > ; <<4 x i32>> [#uses=1]
+ %tmp4116.i = or <4 x i32> %tmp4109.i, < i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216 > ; <<4 x i32>> [#uses=1]
+ %tmp4117.i = bitcast <4 x i32> %tmp4116.i to <4 x float> ; <<4 x float>> [#uses=1]
+ fadd <4 x float> %tmp4117.i, zeroinitializer ; <<4 x float>>:10 [#uses=1]
+ fmul <4 x float> %10, < float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01 > ; <<4 x float>>:11 [#uses=1]
+ call <4 x float> @llvm.x86.sse.max.ps( <4 x float> %11, <4 x float> zeroinitializer ) nounwind readnone ; <<4 x float>>:12 [#uses=1]
+ call <4 x float> @llvm.x86.sse.min.ps( <4 x float> %12, <4 x float> zeroinitializer ) nounwind readnone ; <<4 x float>>:13 [#uses=1]
+ %tmp4170.i = call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %13, <4 x float> zeroinitializer, i8 2 ) nounwind ; <<4 x float>> [#uses=1]
+ bitcast <4 x float> %tmp4170.i to <16 x i8> ; <<16 x i8>>:14 [#uses=1]
+ call i32 @llvm.x86.sse2.pmovmskb.128( <16 x i8> %14 ) nounwind readnone ; <i32>:15 [#uses=1]
+ icmp eq i32 %15, 0 ; <i1>:16 [#uses=1]
+ br i1 %16, label %bb5574.i, label %bb4521.i
+
+bb4521.i: ; preds = %bb266.i
+ unreachable
+
+bb5574.i: ; preds = %bb266.i
+ unreachable
+}
+
+declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
+
+declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
+
+declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
+
+declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
diff --git a/test/Transforms/InstCombine/shufflevec-constant.ll b/test/Transforms/InstCombine/shufflevec-constant.ll
new file mode 100644
index 0000000..29ae5a7
--- /dev/null
+++ b/test/Transforms/InstCombine/shufflevec-constant.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {ret <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0x7FF0000000000000, float 0x7FF0000000000000>}
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9"
+
+define <4 x float> @__inff4() nounwind readnone {
+entry:
+ %tmp14 = extractelement <1 x double> bitcast (<2 x float> <float 0x7FF0000000000000, float 0x7FF0000000000000> to <1 x double>), i32 0 ; <double> [#uses=1]
+ %tmp4 = bitcast double %tmp14 to i64 ; <i64> [#uses=1]
+ %tmp3 = bitcast i64 %tmp4 to <2 x float> ; <<2 x float>> [#uses=1]
+ %tmp8 = shufflevector <2 x float> %tmp3, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef> ; <<4 x float>> [#uses=1]
+ %tmp9 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp8, <4 x i32> <i32 0, i32 1, i32 4, i32 5> ; <<4 x float>> [#uses=0]
+ ret <4 x float> %tmp9
+}
diff --git a/test/Transforms/InstCombine/signed-comparison.ll b/test/Transforms/InstCombine/signed-comparison.ll
new file mode 100644
index 0000000..9a08c64
--- /dev/null
+++ b/test/Transforms/InstCombine/signed-comparison.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -instcombine -S > %t
+; RUN: not grep zext %t
+; RUN: not grep slt %t
+; RUN: grep {icmp ult} %t
+
+; Instcombine should convert the zext+slt into a simple ult.
+
+define void @foo(double* %p) nounwind {
+entry:
+ br label %bb
+
+bb:
+ %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %bb ]
+ %t0 = and i64 %indvar, 65535
+ %t1 = getelementptr double* %p, i64 %t0
+ %t2 = load double* %t1, align 8
+ %t3 = fmul double %t2, 2.2
+ store double %t3, double* %t1, align 8
+ %i.04 = trunc i64 %indvar to i16
+ %t4 = add i16 %i.04, 1
+ %t5 = zext i16 %t4 to i32
+ %t6 = icmp slt i32 %t5, 500
+ %indvar.next = add i64 %indvar, 1
+ br i1 %t6, label %bb, label %return
+
+return:
+ ret void
+}
diff --git a/test/Transforms/InstCombine/signext.ll b/test/Transforms/InstCombine/signext.ll
new file mode 100644
index 0000000..49384d6
--- /dev/null
+++ b/test/Transforms/InstCombine/signext.ll
@@ -0,0 +1,87 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128:n8:16:32:64"
+
+define i32 @test1(i32 %x) {
+ %tmp.1 = and i32 %x, 65535 ; <i32> [#uses=1]
+ %tmp.2 = xor i32 %tmp.1, -32768 ; <i32> [#uses=1]
+ %tmp.3 = add i32 %tmp.2, 32768 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+; CHECK: @test1
+; CHECK: %sext = shl i32 %x, 16
+; CHECK: %tmp.3 = ashr i32 %sext, 16
+; CHECK: ret i32 %tmp.3
+}
+
+define i32 @test2(i32 %x) {
+ %tmp.1 = and i32 %x, 65535 ; <i32> [#uses=1]
+ %tmp.2 = xor i32 %tmp.1, 32768 ; <i32> [#uses=1]
+ %tmp.3 = add i32 %tmp.2, -32768 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+; CHECK: @test2
+; CHECK: %sext = shl i32 %x, 16
+; CHECK: %tmp.3 = ashr i32 %sext, 16
+; CHECK: ret i32 %tmp.3
+}
+
+define i32 @test3(i16 %P) {
+ %tmp.1 = zext i16 %P to i32 ; <i32> [#uses=1]
+ %tmp.4 = xor i32 %tmp.1, 32768 ; <i32> [#uses=1]
+ %tmp.5 = add i32 %tmp.4, -32768 ; <i32> [#uses=1]
+ ret i32 %tmp.5
+; CHECK: @test3
+; CHECK: %tmp.5 = sext i16 %P to i32
+; CHECK: ret i32 %tmp.5
+}
+
+define i32 @test4(i16 %P) {
+ %tmp.1 = zext i16 %P to i32 ; <i32> [#uses=1]
+ %tmp.4 = xor i32 %tmp.1, 32768 ; <i32> [#uses=1]
+ %tmp.5 = add i32 %tmp.4, -32768 ; <i32> [#uses=1]
+ ret i32 %tmp.5
+; CHECK: @test4
+; CHECK: %tmp.5 = sext i16 %P to i32
+; CHECK: ret i32 %tmp.5
+}
+
+define i32 @test5(i32 %x) {
+ %tmp.1 = and i32 %x, 255 ; <i32> [#uses=1]
+ %tmp.2 = xor i32 %tmp.1, 128 ; <i32> [#uses=1]
+ %tmp.3 = add i32 %tmp.2, -128 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+; CHECK: @test5
+; CHECK: %sext = shl i32 %x, 24
+; CHECK: %tmp.3 = ashr i32 %sext, 24
+; CHECK: ret i32 %tmp.3
+}
+
+define i32 @test6(i32 %x) {
+ %tmp.2 = shl i32 %x, 16 ; <i32> [#uses=1]
+ %tmp.4 = ashr i32 %tmp.2, 16 ; <i32> [#uses=1]
+ ret i32 %tmp.4
+; CHECK: @test6
+; CHECK: %tmp.2 = shl i32 %x, 16
+; CHECK: %tmp.4 = ashr i32 %tmp.2, 16
+; CHECK: ret i32 %tmp.4
+}
+
+define i32 @test7(i16 %P) {
+ %tmp.1 = zext i16 %P to i32 ; <i32> [#uses=1]
+ %sext1 = shl i32 %tmp.1, 16 ; <i32> [#uses=1]
+ %tmp.5 = ashr i32 %sext1, 16 ; <i32> [#uses=1]
+ ret i32 %tmp.5
+; CHECK: @test7
+; CHECK: %tmp.5 = sext i16 %P to i32
+; CHECK: ret i32 %tmp.5
+}
+
+define i32 @test8(i32 %x) nounwind readnone {
+entry:
+ %shr = lshr i32 %x, 5 ; <i32> [#uses=1]
+ %xor = xor i32 %shr, 67108864 ; <i32> [#uses=1]
+ %sub = add i32 %xor, -67108864 ; <i32> [#uses=1]
+ ret i32 %sub
+; CHECK: @test8
+; CHECK: %sub = ashr i32 %x, 5
+; CHECK: ret i32 %sub
+}
diff --git a/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll b/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll
new file mode 100644
index 0000000..6d2193f
--- /dev/null
+++ b/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll
@@ -0,0 +1,84 @@
+; RUN: opt < %s -instcombine -disable-output
+
+; SimplifyDemandedBits should cope with pointer types.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target triple = "x86_64-unknown-linux-gnu"
+ %struct.VEC_rtx_base = type { i32, i32, [1 x %struct.rtx_def*] }
+ %struct.VEC_rtx_gc = type { %struct.VEC_rtx_base }
+ %struct.block_symbol = type { [3 x %struct.rtunion], %struct.object_block*, i64 }
+ %struct.object_block = type { %struct.section*, i32, i64, %struct.VEC_rtx_gc*, %struct.VEC_rtx_gc* }
+ %struct.omp_clause_subcode = type { i32 }
+ %struct.rtunion = type { i8* }
+ %struct.rtx_def = type { i16, i8, i8, %struct.u }
+ %struct.section = type { %struct.unnamed_section }
+ %struct.u = type { %struct.block_symbol }
+ %struct.unnamed_section = type { %struct.omp_clause_subcode, void (i8*)*, i8*, %struct.section* }
+
+define fastcc void @cse_insn(%struct.rtx_def* %insn, %struct.rtx_def* %libcall_insn) nounwind {
+entry:
+ br i1 undef, label %bb43, label %bb88
+
+bb43: ; preds = %entry
+ br label %bb88
+
+bb88: ; preds = %bb43, %entry
+ br i1 undef, label %bb95, label %bb107
+
+bb95: ; preds = %bb88
+ unreachable
+
+bb107: ; preds = %bb88
+ %0 = load i16* undef, align 8 ; <i16> [#uses=1]
+ %1 = icmp eq i16 %0, 38 ; <i1> [#uses=1]
+ %src_eqv_here.0 = select i1 %1, %struct.rtx_def* null, %struct.rtx_def* null ; <%struct.rtx_def*> [#uses=1]
+ br i1 undef, label %bb127, label %bb125
+
+bb125: ; preds = %bb107
+ br i1 undef, label %bb127, label %bb126
+
+bb126: ; preds = %bb125
+ br i1 undef, label %bb129, label %bb133
+
+bb127: ; preds = %bb125, %bb107
+ unreachable
+
+bb129: ; preds = %bb126
+ br label %bb133
+
+bb133: ; preds = %bb129, %bb126
+ br i1 undef, label %bb134, label %bb146
+
+bb134: ; preds = %bb133
+ unreachable
+
+bb146: ; preds = %bb133
+ br i1 undef, label %bb180, label %bb186
+
+bb180: ; preds = %bb146
+ %2 = icmp eq %struct.rtx_def* null, null ; <i1> [#uses=1]
+ %3 = zext i1 %2 to i8 ; <i8> [#uses=1]
+ %4 = icmp ne %struct.rtx_def* %src_eqv_here.0, null ; <i1> [#uses=1]
+ %5 = zext i1 %4 to i8 ; <i8> [#uses=1]
+ %toBool181 = icmp ne i8 %3, 0 ; <i1> [#uses=1]
+ %toBool182 = icmp ne i8 %5, 0 ; <i1> [#uses=1]
+ %6 = and i1 %toBool181, %toBool182 ; <i1> [#uses=1]
+ %7 = zext i1 %6 to i8 ; <i8> [#uses=1]
+ %toBool183 = icmp ne i8 %7, 0 ; <i1> [#uses=1]
+ br i1 %toBool183, label %bb184, label %bb186
+
+bb184: ; preds = %bb180
+ br i1 undef, label %bb185, label %bb186
+
+bb185: ; preds = %bb184
+ br label %bb186
+
+bb186: ; preds = %bb185, %bb184, %bb180, %bb146
+ br i1 undef, label %bb190, label %bb195
+
+bb190: ; preds = %bb186
+ unreachable
+
+bb195: ; preds = %bb186
+ unreachable
+}
diff --git a/test/Transforms/InstCombine/sink_instruction.ll b/test/Transforms/InstCombine/sink_instruction.ll
new file mode 100644
index 0000000..e521de2
--- /dev/null
+++ b/test/Transforms/InstCombine/sink_instruction.ll
@@ -0,0 +1,56 @@
+; RUN: opt -instcombine %s -S | FileCheck %s
+
+;; This tests that the instructions in the entry blocks are sunk into each
+;; arm of the 'if'.
+
+define i32 @test1(i1 %C, i32 %A, i32 %B) {
+; CHECK: @test1
+entry:
+ %tmp.2 = sdiv i32 %A, %B ; <i32> [#uses=1]
+ %tmp.9 = add i32 %B, %A ; <i32> [#uses=1]
+ br i1 %C, label %then, label %endif
+
+then: ; preds = %entry
+ ret i32 %tmp.9
+
+endif: ; preds = %entry
+; CHECK: sdiv i32
+; CHECK-NEXT: ret i32
+ ret i32 %tmp.2
+}
+
+
+;; PHI use, sink divide before call.
+define i32 @test2(i32 %x) nounwind ssp {
+; CHECK: @test2
+; CHECK-NOT: sdiv i32
+entry:
+ br label %bb
+
+bb: ; preds = %bb2, %entry
+ %x_addr.17 = phi i32 [ %x, %entry ], [ %x_addr.0, %bb2 ] ; <i32> [#uses=4]
+ %i.06 = phi i32 [ 0, %entry ], [ %4, %bb2 ] ; <i32> [#uses=1]
+ %0 = add nsw i32 %x_addr.17, 1 ; <i32> [#uses=1]
+ %1 = sdiv i32 %0, %x_addr.17 ; <i32> [#uses=1]
+ %2 = icmp eq i32 %x_addr.17, 0 ; <i1> [#uses=1]
+ br i1 %2, label %bb1, label %bb2
+
+bb1: ; preds = %bb
+; CHECK: bb1:
+; CHECK-NEXT: add nsw i32 %x_addr.17, 1
+; CHECK-NEXT: sdiv i32
+; CHECK-NEXT: tail call i32 @bar()
+ %3 = tail call i32 @bar() nounwind ; <i32> [#uses=0]
+ br label %bb2
+
+bb2: ; preds = %bb, %bb1
+ %x_addr.0 = phi i32 [ %1, %bb1 ], [ %x_addr.17, %bb ] ; <i32> [#uses=2]
+ %4 = add nsw i32 %i.06, 1 ; <i32> [#uses=2]
+ %exitcond = icmp eq i32 %4, 1000000 ; <i1> [#uses=1]
+ br i1 %exitcond, label %bb4, label %bb
+
+bb4: ; preds = %bb2
+ ret i32 %x_addr.0
+}
+
+declare i32 @bar()
diff --git a/test/Transforms/InstCombine/sitofp.ll b/test/Transforms/InstCombine/sitofp.ll
new file mode 100644
index 0000000..bd31b89
--- /dev/null
+++ b/test/Transforms/InstCombine/sitofp.ll
@@ -0,0 +1,55 @@
+; RUN: opt < %s -instcombine -S | not grep itofp
+
+define i1 @test1(i8 %A) {
+ %B = sitofp i8 %A to double
+ %C = fcmp ult double %B, 128.0
+ ret i1 %C ; True!
+}
+define i1 @test2(i8 %A) {
+ %B = sitofp i8 %A to double
+ %C = fcmp ugt double %B, -128.1
+ ret i1 %C ; True!
+}
+
+define i1 @test3(i8 %A) {
+ %B = sitofp i8 %A to double
+ %C = fcmp ule double %B, 127.0
+ ret i1 %C ; true!
+}
+
+define i1 @test4(i8 %A) {
+ %B = sitofp i8 %A to double
+ %C = fcmp ult double %B, 127.0
+ ret i1 %C ; A != 127
+}
+
+define i32 @test5(i32 %A) {
+ %B = sitofp i32 %A to double
+ %C = fptosi double %B to i32
+ %D = uitofp i32 %C to double
+ %E = fptoui double %D to i32
+ ret i32 %E
+}
+
+define i32 @test6(i32 %A) {
+ %B = and i32 %A, 7 ; <i32> [#uses=1]
+ %C = and i32 %A, 32 ; <i32> [#uses=1]
+ %D = sitofp i32 %B to double ; <double> [#uses=1]
+ %E = sitofp i32 %C to double ; <double> [#uses=1]
+ %F = fadd double %D, %E ; <double> [#uses=1]
+ %G = fptosi double %F to i32 ; <i32> [#uses=1]
+ ret i32 %G
+}
+
+define i32 @test7(i32 %a) nounwind {
+ %b = sitofp i32 %a to double ; <double> [#uses=1]
+ %c = fptoui double %b to i32 ; <i32> [#uses=1]
+ ret i32 %c
+}
+
+define i32 @test8(i32 %a) nounwind {
+ %b = uitofp i32 %a to double ; <double> [#uses=1]
+ %c = fptosi double %b to i32 ; <i32> [#uses=1]
+ ret i32 %c
+}
+
diff --git a/test/Transforms/InstCombine/srem-simplify-bug.ll b/test/Transforms/InstCombine/srem-simplify-bug.ll
new file mode 100644
index 0000000..af824a4
--- /dev/null
+++ b/test/Transforms/InstCombine/srem-simplify-bug.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 false}
+; PR2276
+
+define i1 @f(i32 %x) {
+ %A = or i32 %x, 1
+ %B = srem i32 %A, 1
+ %C = icmp ne i32 %B, 0
+ ret i1 %C
+}
diff --git a/test/Transforms/InstCombine/srem.ll b/test/Transforms/InstCombine/srem.ll
new file mode 100644
index 0000000..beefe4f
--- /dev/null
+++ b/test/Transforms/InstCombine/srem.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep srem
+
+define i64 @foo(i64 %x1, i64 %y2) {
+ %r = sdiv i64 %x1, %y2
+ %r7 = mul i64 %r, %y2
+ %r8 = sub i64 %x1, %r7
+ ret i64 %r8
+}
diff --git a/test/Transforms/InstCombine/srem1.ll b/test/Transforms/InstCombine/srem1.ll
new file mode 100644
index 0000000..f18690c
--- /dev/null
+++ b/test/Transforms/InstCombine/srem1.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine
+; PR2670
+
+@g_127 = external global i32 ; <i32*> [#uses=1]
+
+define i32 @func_56(i32 %p_58, i32 %p_59, i32 %p_61, i16 signext %p_62) nounwind {
+entry:
+ %call = call i32 (...)* @rshift_s_s( i32 %p_61, i32 1 ) ; <i32> [#uses=1]
+ %conv = sext i32 %call to i64 ; <i64> [#uses=1]
+ %or = or i64 -1734012817166602727, %conv ; <i64> [#uses=1]
+ %rem = srem i64 %or, 1 ; <i64> [#uses=1]
+ %cmp = icmp eq i64 %rem, 1 ; <i1> [#uses=1]
+ %cmp.ext = zext i1 %cmp to i32 ; <i32> [#uses=1]
+ store i32 %cmp.ext, i32* @g_127
+ ret i32 undef
+}
+
+declare i32 @rshift_s_s(...)
diff --git a/test/Transforms/InstCombine/stack-overalign.ll b/test/Transforms/InstCombine/stack-overalign.ll
new file mode 100644
index 0000000..88b4114
--- /dev/null
+++ b/test/Transforms/InstCombine/stack-overalign.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -instcombine -S | grep {align 32} | count 1
+
+; It's tempting to have an instcombine in which the src pointer of a
+; memcpy is aligned up to the alignment of the destination, however
+; there are pitfalls. If the src is an alloca, aligning it beyond what
+; the target's stack pointer is aligned at will require dynamic
+; stack realignment, which can require functions that don't otherwise
+; need a frame pointer to need one.
+;
+; Abstaining from this transform is not the only way to approach this
+; issue. Some late phase could be smart enough to reduce alloca
+; alignments when they are greater than they need to be. Or, codegen
+; could do dynamic alignment for just the one alloca, and leave the
+; main stack pointer at its standard alignment.
+
+@dst = global [1024 x i8] zeroinitializer, align 32
+
+define void @foo() nounwind {
+entry:
+ %src = alloca [1024 x i8], align 1
+ %src1 = getelementptr [1024 x i8]* %src, i32 0, i32 0
+ call void @llvm.memcpy.i32(i8* getelementptr ([1024 x i8]* @dst, i32 0, i32 0), i8* %src1, i32 1024, i32 1)
+ call void @frob(i8* %src1) nounwind
+ ret void
+}
+
+declare void @llvm.memcpy.i32(i8* nocapture, i8* nocapture, i32, i32) nounwind
+
+declare void @frob(i8*)
diff --git a/test/Transforms/InstCombine/stacksaverestore.ll b/test/Transforms/InstCombine/stacksaverestore.ll
new file mode 100644
index 0000000..0fcaefa
--- /dev/null
+++ b/test/Transforms/InstCombine/stacksaverestore.ll
@@ -0,0 +1,56 @@
+; RUN: opt < %s -instcombine -S | grep {call.*stackrestore} | count 1
+
+declare i8* @llvm.stacksave()
+declare void @llvm.stackrestore(i8*)
+
+;; Test that llvm.stackrestore is removed when possible.
+define i32* @test1(i32 %P) {
+ %tmp = call i8* @llvm.stacksave( )
+ call void @llvm.stackrestore( i8* %tmp ) ;; not restoring anything
+ %A = alloca i32, i32 %P
+ ret i32* %A
+}
+
+define void @test2(i8* %X) {
+ call void @llvm.stackrestore( i8* %X ) ;; no allocas before return.
+ ret void
+}
+
+define void @foo(i32 %size) nounwind {
+entry:
+ %tmp118124 = icmp sgt i32 %size, 0 ; <i1> [#uses=1]
+ br i1 %tmp118124, label %bb.preheader, label %return
+
+bb.preheader: ; preds = %entry
+ %tmp25 = add i32 %size, -1 ; <i32> [#uses=1]
+ %tmp125 = icmp slt i32 %size, 1 ; <i1> [#uses=1]
+ %smax = select i1 %tmp125, i32 1, i32 %size ; <i32> [#uses=1]
+ br label %bb
+
+bb: ; preds = %bb, %bb.preheader
+ %i.0.reg2mem.0 = phi i32 [ 0, %bb.preheader ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
+ %tmp = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp23 = alloca i8, i32 %size ; <i8*> [#uses=2]
+ %tmp27 = getelementptr i8* %tmp23, i32 %tmp25 ; <i8*> [#uses=1]
+ store i8 0, i8* %tmp27, align 1
+ %tmp28 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp52 = alloca i8, i32 %size ; <i8*> [#uses=1]
+ %tmp53 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp77 = alloca i8, i32 %size ; <i8*> [#uses=1]
+ %tmp78 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp102 = alloca i8, i32 %size ; <i8*> [#uses=1]
+ call void @bar( i32 %i.0.reg2mem.0, i8* %tmp23, i8* %tmp52, i8* %tmp77, i8* %tmp102, i32 %size ) nounwind
+ call void @llvm.stackrestore( i8* %tmp78 )
+ call void @llvm.stackrestore( i8* %tmp53 )
+ call void @llvm.stackrestore( i8* %tmp28 )
+ call void @llvm.stackrestore( i8* %tmp )
+ %indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
+ %exitcond = icmp eq i32 %indvar.next, %smax ; <i1> [#uses=1]
+ br i1 %exitcond, label %return, label %bb
+
+return: ; preds = %bb, %entry
+ ret void
+}
+
+declare void @bar(i32, i8*, i8*, i8*, i8*, i32)
+
diff --git a/test/Transforms/InstCombine/store.ll b/test/Transforms/InstCombine/store.ll
new file mode 100644
index 0000000..64460d7
--- /dev/null
+++ b/test/Transforms/InstCombine/store.ll
@@ -0,0 +1,85 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define void @test1(i32* %P) {
+ store i32 undef, i32* %P
+ store i32 123, i32* undef
+ store i32 124, i32* null
+ ret void
+; CHECK: @test1(
+; CHECK-NEXT: store i32 123, i32* undef
+; CHECK-NEXT: store i32 undef, i32* null
+; CHECK-NEXT: ret void
+}
+
+define void @test2(i32* %P) {
+ %X = load i32* %P ; <i32> [#uses=1]
+ %Y = add i32 %X, 0 ; <i32> [#uses=1]
+ store i32 %Y, i32* %P
+ ret void
+; CHECK: @test2
+; CHECK-NEXT: ret void
+}
+
+;; Simple sinking tests
+
+; "if then else"
+define i32 @test3(i1 %C) {
+ %A = alloca i32
+ br i1 %C, label %Cond, label %Cond2
+
+Cond:
+ store i32 -987654321, i32* %A
+ br label %Cont
+
+Cond2:
+ store i32 47, i32* %A
+ br label %Cont
+
+Cont:
+ %V = load i32* %A
+ ret i32 %V
+; CHECK: @test3
+; CHECK-NOT: alloca
+; CHECK: Cont:
+; CHECK-NEXT: %storemerge = phi i32 [ 47, %Cond2 ], [ -987654321, %Cond ]
+; CHECK-NEXT: ret i32 %storemerge
+}
+
+; "if then"
+define i32 @test4(i1 %C) {
+ %A = alloca i32
+ store i32 47, i32* %A
+ br i1 %C, label %Cond, label %Cont
+
+Cond:
+ store i32 -987654321, i32* %A
+ br label %Cont
+
+Cont:
+ %V = load i32* %A
+ ret i32 %V
+; CHECK: @test4
+; CHECK-NOT: alloca
+; CHECK: Cont:
+; CHECK-NEXT: %storemerge = phi i32 [ -987654321, %Cond ], [ 47, %0 ]
+; CHECK-NEXT: ret i32 %storemerge
+}
+
+; "if then"
+define void @test5(i1 %C, i32* %P) {
+ store i32 47, i32* %P, align 1
+ br i1 %C, label %Cond, label %Cont
+
+Cond:
+ store i32 -987654321, i32* %P, align 1
+ br label %Cont
+
+Cont:
+ ret void
+; CHECK: @test5
+; CHECK: Cont:
+; CHECK-NEXT: %storemerge = phi i32
+; CHECK-NEXT: store i32 %storemerge, i32* %P, align 1
+; CHECK-NEXT: ret void
+}
+
diff --git a/test/Transforms/InstCombine/sub.ll b/test/Transforms/InstCombine/sub.ll
new file mode 100644
index 0000000..29bd7be
--- /dev/null
+++ b/test/Transforms/InstCombine/sub.ll
@@ -0,0 +1,283 @@
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+; Optimize subtracts.
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @test1(i32 %A) {
+ %B = sub i32 %A, %A
+ ret i32 %B
+; CHECK: @test1
+; CHECK: ret i32 0
+}
+
+define i32 @test2(i32 %A) {
+ %B = sub i32 %A, 0
+ ret i32 %B
+; CHECK: @test2
+; CHECK: ret i32 %A
+}
+
+define i32 @test3(i32 %A) {
+ %B = sub i32 0, %A
+ %C = sub i32 0, %B
+ ret i32 %C
+; CHECK: @test3
+; CHECK: ret i32 %A
+}
+
+define i32 @test4(i32 %A, i32 %x) {
+ %B = sub i32 0, %A
+ %C = sub i32 %x, %B
+ ret i32 %C
+; CHECK: @test4
+; CHECK: %C = add i32 %x, %A
+; CHECK: ret i32 %C
+}
+
+define i32 @test5(i32 %A, i32 %B, i32 %C) {
+ %D = sub i32 %B, %C
+ %E = sub i32 %A, %D
+ ret i32 %E
+; CHECK: @test5
+; CHECK: %D = sub i32 %C, %B
+; CHECK: %E = add
+; CHECK: ret i32 %E
+}
+
+define i32 @test6(i32 %A, i32 %B) {
+ %C = and i32 %A, %B
+ %D = sub i32 %A, %C
+ ret i32 %D
+; CHECK: @test6
+; CHECK-NEXT: xor i32 %B, -1
+; CHECK-NEXT: %D = and i32
+; CHECK-NEXT: ret i32 %D
+}
+
+define i32 @test7(i32 %A) {
+ %B = sub i32 -1, %A
+ ret i32 %B
+; CHECK: @test7
+; CHECK: %B = xor i32 %A, -1
+; CHECK: ret i32 %B
+}
+
+define i32 @test8(i32 %A) {
+ %B = mul i32 9, %A
+ %C = sub i32 %B, %A
+ ret i32 %C
+; CHECK: @test8
+; CHECK: %C = shl i32 %A, 3
+; CHECK: ret i32 %C
+}
+
+define i32 @test9(i32 %A) {
+ %B = mul i32 3, %A
+ %C = sub i32 %A, %B
+ ret i32 %C
+; CHECK: @test9
+; CHECK: %C = mul i32 %A, -2
+; CHECK: ret i32 %C
+}
+
+define i32 @test10(i32 %A, i32 %B) {
+ %C = sub i32 0, %A
+ %D = sub i32 0, %B
+ %E = mul i32 %C, %D
+ ret i32 %E
+; CHECK: @test10
+; CHECK: %E = mul i32 %A, %B
+; CHECK: ret i32 %E
+}
+
+define i32 @test10a(i32 %A) {
+ %C = sub i32 0, %A
+ %E = mul i32 %C, 7
+ ret i32 %E
+; CHECK: @test10a
+; CHECK: %E = mul i32 %A, -7
+; CHECK: ret i32 %E
+}
+
+define i1 @test11(i8 %A, i8 %B) {
+ %C = sub i8 %A, %B
+ %cD = icmp ne i8 %C, 0
+ ret i1 %cD
+; CHECK: @test11
+; CHECK: %cD = icmp ne i8 %A, %B
+; CHECK: ret i1 %cD
+}
+
+define i32 @test12(i32 %A) {
+ %B = ashr i32 %A, 31
+ %C = sub i32 0, %B
+ ret i32 %C
+; CHECK: @test12
+; CHECK: %C = lshr i32 %A, 31
+; CHECK: ret i32 %C
+}
+
+define i32 @test13(i32 %A) {
+ %B = lshr i32 %A, 31
+ %C = sub i32 0, %B
+ ret i32 %C
+; CHECK: @test13
+; CHECK: %C = ashr i32 %A, 31
+; CHECK: ret i32 %C
+}
+
+define i32 @test14(i32 %A) {
+ %B = lshr i32 %A, 31
+ %C = bitcast i32 %B to i32
+ %D = sub i32 0, %C
+ ret i32 %D
+; CHECK: @test14
+; CHECK: %D = ashr i32 %A, 31
+; CHECK: ret i32 %D
+}
+
+define i32 @test15(i32 %A, i32 %B) {
+ %C = sub i32 0, %A
+ %D = srem i32 %B, %C
+ ret i32 %D
+; CHECK: @test15
+; CHECK: %D = srem i32 %B, %A
+; CHECK: ret i32 %D
+}
+
+define i32 @test16(i32 %A) {
+ %X = sdiv i32 %A, 1123
+ %Y = sub i32 0, %X
+ ret i32 %Y
+; CHECK: @test16
+; CHECK: %Y = sdiv i32 %A, -1123
+; CHECK: ret i32 %Y
+}
+
+; Can't fold subtract here because negation it might oveflow.
+; PR3142
+define i32 @test17(i32 %A) {
+ %B = sub i32 0, %A
+ %C = sdiv i32 %B, 1234
+ ret i32 %C
+; CHECK: @test17
+; CHECK: %B = sub i32 0, %A
+; CHECK: %C = sdiv i32 %B, 1234
+; CHECK: ret i32 %C
+}
+
+define i64 @test18(i64 %Y) {
+ %tmp.4 = shl i64 %Y, 2
+ %tmp.12 = shl i64 %Y, 2
+ %tmp.8 = sub i64 %tmp.4, %tmp.12
+ ret i64 %tmp.8
+; CHECK: @test18
+; CHECK: ret i64 0
+}
+
+define i32 @test19(i32 %X, i32 %Y) {
+ %Z = sub i32 %X, %Y
+ %Q = add i32 %Z, %Y
+ ret i32 %Q
+; CHECK: @test19
+; CHECK: ret i32 %X
+}
+
+define i1 @test20(i32 %g, i32 %h) {
+ %tmp.2 = sub i32 %g, %h
+ %tmp.4 = icmp ne i32 %tmp.2, %g
+ ret i1 %tmp.4
+; CHECK: @test20
+; CHECK: %tmp.4 = icmp ne i32 %h, 0
+; CHECK: ret i1 %tmp.4
+}
+
+define i1 @test21(i32 %g, i32 %h) {
+ %tmp.2 = sub i32 %g, %h
+ %tmp.4 = icmp ne i32 %tmp.2, %g
+ ret i1 %tmp.4
+; CHECK: @test21
+; CHECK: %tmp.4 = icmp ne i32 %h, 0
+; CHECK: ret i1 %tmp.4
+}
+
+; PR2298
+define i1 @test22(i32 %a, i32 %b) zeroext nounwind {
+ %tmp2 = sub i32 0, %a
+ %tmp4 = sub i32 0, %b
+ %tmp5 = icmp eq i32 %tmp2, %tmp4
+ ret i1 %tmp5
+; CHECK: @test22
+; CHECK: %tmp5 = icmp eq i32 %a, %b
+; CHECK: ret i1 %tmp5
+}
+
+; rdar://7362831
+define i32 @test23(i8* %P, i64 %A){
+ %B = getelementptr inbounds i8* %P, i64 %A
+ %C = ptrtoint i8* %B to i64
+ %D = trunc i64 %C to i32
+ %E = ptrtoint i8* %P to i64
+ %F = trunc i64 %E to i32
+ %G = sub i32 %D, %F
+ ret i32 %G
+; CHECK: @test23
+; CHECK-NEXT: = trunc i64 %A to i32
+; CHECK-NEXT: ret i32
+}
+
+define i64 @test24(i8* %P, i64 %A){
+ %B = getelementptr inbounds i8* %P, i64 %A
+ %C = ptrtoint i8* %B to i64
+ %E = ptrtoint i8* %P to i64
+ %G = sub i64 %C, %E
+ ret i64 %G
+; CHECK: @test24
+; CHECK-NEXT: ret i64 %A
+}
+
+define i64 @test24a(i8* %P, i64 %A){
+ %B = getelementptr inbounds i8* %P, i64 %A
+ %C = ptrtoint i8* %B to i64
+ %E = ptrtoint i8* %P to i64
+ %G = sub i64 %E, %C
+ ret i64 %G
+; CHECK: @test24a
+; CHECK-NEXT: sub i64 0, %A
+; CHECK-NEXT: ret i64
+}
+
+@Arr = external global [42 x i16]
+
+define i64 @test24b(i8* %P, i64 %A){
+ %B = getelementptr inbounds [42 x i16]* @Arr, i64 0, i64 %A
+ %C = ptrtoint i16* %B to i64
+ %G = sub i64 %C, ptrtoint ([42 x i16]* @Arr to i64)
+ ret i64 %G
+; CHECK: @test24b
+; CHECK-NEXT: shl i64 %A, 1
+; CHECK-NEXT: ret i64
+}
+
+
+define i64 @test25(i8* %P, i64 %A){
+ %B = getelementptr inbounds [42 x i16]* @Arr, i64 0, i64 %A
+ %C = ptrtoint i16* %B to i64
+ %G = sub i64 %C, ptrtoint (i16* getelementptr ([42 x i16]* @Arr, i64 1, i64 0) to i64)
+ ret i64 %G
+; CHECK: @test25
+; CHECK-NEXT: shl i64 %A, 1
+; CHECK-NEXT: add i64 {{.*}}, -84
+; CHECK-NEXT: ret i64
+}
+
+define i32 @test26(i32 %x) {
+ %shl = shl i32 3, %x
+ %neg = sub i32 0, %shl
+ ret i32 %neg
+; CHECK: @test26
+; CHECK-NEXT: shl i32 -3
+; CHECK-NEXT: ret i32
+}
+
diff --git a/test/Transforms/InstCombine/trunc-mask-ext.ll b/test/Transforms/InstCombine/trunc-mask-ext.ll
new file mode 100644
index 0000000..93e3753
--- /dev/null
+++ b/test/Transforms/InstCombine/trunc-mask-ext.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -instcombine -S > %t
+; RUN: not grep zext %t
+; RUN: not grep sext %t
+
+; Instcombine should be able to eliminate all of these ext casts.
+
+declare void @use(i32)
+
+define i64 @foo(i64 %a) {
+ %b = trunc i64 %a to i32
+ %c = and i32 %b, 15
+ %d = zext i32 %c to i64
+ call void @use(i32 %b)
+ ret i64 %d
+}
+define i64 @bar(i64 %a) {
+ %b = trunc i64 %a to i32
+ %c = shl i32 %b, 4
+ %q = ashr i32 %c, 4
+ %d = sext i32 %q to i64
+ call void @use(i32 %b)
+ ret i64 %d
+}
+define i64 @goo(i64 %a) {
+ %b = trunc i64 %a to i32
+ %c = and i32 %b, 8
+ %d = zext i32 %c to i64
+ call void @use(i32 %b)
+ ret i64 %d
+}
+define i64 @hoo(i64 %a) {
+ %b = trunc i64 %a to i32
+ %c = and i32 %b, 8
+ %x = xor i32 %c, 8
+ %d = zext i32 %x to i64
+ call void @use(i32 %b)
+ ret i64 %d
+}
diff --git a/test/Transforms/InstCombine/udiv-simplify-bug-0.ll b/test/Transforms/InstCombine/udiv-simplify-bug-0.ll
new file mode 100644
index 0000000..bfdd98c
--- /dev/null
+++ b/test/Transforms/InstCombine/udiv-simplify-bug-0.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {ret i64 0} | count 2
+
+define i64 @foo(i32 %x) nounwind {
+ %y = lshr i32 %x, 1
+ %r = udiv i32 %y, -1
+ %z = sext i32 %r to i64
+ ret i64 %z
+}
+define i64 @bar(i32 %x) nounwind {
+ %y = lshr i32 %x, 31
+ %r = udiv i32 %y, 3
+ %z = sext i32 %r to i64
+ ret i64 %z
+}
diff --git a/test/Transforms/InstCombine/udiv-simplify-bug-1.ll b/test/Transforms/InstCombine/udiv-simplify-bug-1.ll
new file mode 100644
index 0000000..d95e8f8
--- /dev/null
+++ b/test/Transforms/InstCombine/udiv-simplify-bug-1.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -instcombine -S > %t1.ll
+; RUN: grep udiv %t1.ll | count 2
+; RUN: grep zext %t1.ll | count 2
+; PR2274
+
+; The udiv instructions shouldn't be optimized away, and the
+; sext instructions should be optimized to zext.
+
+define i64 @bar(i32 %x) nounwind {
+ %y = lshr i32 %x, 30
+ %r = udiv i32 %y, 3
+ %z = sext i32 %r to i64
+ ret i64 %z
+}
+define i64 @qux(i32 %x, i32 %v) nounwind {
+ %y = lshr i32 %x, 31
+ %r = udiv i32 %y, %v
+ %z = sext i32 %r to i64
+ ret i64 %z
+}
diff --git a/test/Transforms/InstCombine/udiv_select_to_select_shift.ll b/test/Transforms/InstCombine/udiv_select_to_select_shift.ll
new file mode 100644
index 0000000..9b059a6
--- /dev/null
+++ b/test/Transforms/InstCombine/udiv_select_to_select_shift.ll
@@ -0,0 +1,17 @@
+; Test that this transform works:
+; udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
+;
+; RUN: opt < %s -instcombine -S -o %t
+; RUN: not grep select %t
+; RUN: grep lshr %t | count 2
+; RUN: not grep udiv %t
+
+define i64 @test(i64 %X, i1 %Cond ) {
+entry:
+ %divisor1 = select i1 %Cond, i64 16, i64 8
+ %quotient1 = udiv i64 %X, %divisor1
+ %divisor2 = select i1 %Cond, i64 8, i64 0
+ %quotient2 = udiv i64 %X, %divisor2
+ %sum = add i64 %quotient1, %quotient2
+ ret i64 %sum
+}
diff --git a/test/Transforms/InstCombine/udivrem-change-width.ll b/test/Transforms/InstCombine/udivrem-change-width.ll
new file mode 100644
index 0000000..9983944
--- /dev/null
+++ b/test/Transforms/InstCombine/udivrem-change-width.ll
@@ -0,0 +1,21 @@
+; RUN: opt < %s -instcombine -S | not grep zext
+; PR4548
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+define i8 @udiv_i8(i8 %a, i8 %b) nounwind {
+ %conv = zext i8 %a to i32
+ %conv2 = zext i8 %b to i32
+ %div = udiv i32 %conv, %conv2
+ %conv3 = trunc i32 %div to i8
+ ret i8 %conv3
+}
+
+define i8 @urem_i8(i8 %a, i8 %b) nounwind {
+ %conv = zext i8 %a to i32
+ %conv2 = zext i8 %b to i32
+ %div = urem i32 %conv, %conv2
+ %conv3 = trunc i32 %div to i8
+ ret i8 %conv3
+}
+
diff --git a/test/Transforms/InstCombine/urem-simplify-bug.ll b/test/Transforms/InstCombine/urem-simplify-bug.ll
new file mode 100644
index 0000000..7c2b4b0
--- /dev/null
+++ b/test/Transforms/InstCombine/urem-simplify-bug.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -instcombine -S | grep {= or i32 %x, -5 }
+
+@.str = internal constant [5 x i8] c"foo\0A\00" ; <[5 x i8]*> [#uses=1]
+@.str1 = internal constant [5 x i8] c"bar\0A\00" ; <[5 x i8]*> [#uses=1]
+
+define i32 @main() nounwind {
+entry:
+ %x = call i32 @func_11( ) nounwind ; <i32> [#uses=1]
+ %tmp3 = or i32 %x, -5 ; <i32> [#uses=1]
+ %tmp5 = urem i32 251, %tmp3 ; <i32> [#uses=1]
+ %tmp6 = icmp ne i32 %tmp5, 0 ; <i1> [#uses=1]
+ %tmp67 = zext i1 %tmp6 to i32 ; <i32> [#uses=1]
+ %tmp9 = urem i32 %tmp67, 95 ; <i32> [#uses=1]
+ %tmp10 = and i32 %tmp9, 1 ; <i32> [#uses=1]
+ %tmp12 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1]
+ br i1 %tmp12, label %bb14, label %bb
+
+bb: ; preds = %entry
+ br label %bb15
+
+bb14: ; preds = %entry
+ br label %bb15
+
+bb15: ; preds = %bb14, %bb
+ %iftmp.0.0 = phi i8* [ getelementptr ([5 x i8]* @.str1, i32 0, i32 0), %bb14 ], [ getelementptr ([5 x i8]* @.str, i32 0, i32 0), %bb ] ; <i8*> [#uses=1]
+ %tmp17 = call i32 (i8*, ...)* @printf( i8* %iftmp.0.0 ) nounwind ; <i32> [#uses=0]
+ ret i32 0
+}
+
+declare i32 @func_11()
+
+declare i32 @printf(i8*, ...) nounwind
diff --git a/test/Transforms/InstCombine/urem.ll b/test/Transforms/InstCombine/urem.ll
new file mode 100644
index 0000000..5108422
--- /dev/null
+++ b/test/Transforms/InstCombine/urem.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep urem
+
+define i64 @rem_unsigned(i64 %x1, i64 %y2) {
+ %r = udiv i64 %x1, %y2
+ %r7 = mul i64 %r, %y2
+ %r8 = sub i64 %x1, %r7
+ ret i64 %r8
+}
diff --git a/test/Transforms/InstCombine/vec_demanded_elts-2.ll b/test/Transforms/InstCombine/vec_demanded_elts-2.ll
new file mode 100644
index 0000000..4159361
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_demanded_elts-2.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | not grep extractelement
+
+define void @get_image() nounwind {
+entry:
+ %0 = call i32 @fgetc(i8* null) nounwind ; <i32> [#uses=1]
+ %1 = trunc i32 %0 to i8 ; <i8> [#uses=1]
+ %tmp2 = insertelement <100 x i8> zeroinitializer, i8 %1, i32 1 ; <<100 x i8>> [#uses=1]
+ %tmp1 = extractelement <100 x i8> %tmp2, i32 0 ; <i8> [#uses=1]
+ %2 = icmp eq i8 %tmp1, 80 ; <i1> [#uses=1]
+ br i1 %2, label %bb2, label %bb3
+
+bb2: ; preds = %entry
+ br label %bb3
+
+bb3: ; preds = %bb2, %entry
+ unreachable
+}
+
+declare i32 @fgetc(i8*)
diff --git a/test/Transforms/InstCombine/vec_demanded_elts-3.ll b/test/Transforms/InstCombine/vec_demanded_elts-3.ll
new file mode 100644
index 0000000..62e4370
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_demanded_elts-3.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | not grep load
+; PR4340
+
+define void @vac(<4 x float>* nocapture %a) nounwind {
+entry:
+ %tmp1 = load <4 x float>* %a ; <<4 x float>> [#uses=1]
+ %vecins = insertelement <4 x float> %tmp1, float 0.000000e+00, i32 0 ; <<4 x float>> [#uses=1]
+ %vecins4 = insertelement <4 x float> %vecins, float 0.000000e+00, i32 1; <<4 x float>> [#uses=1]
+ %vecins6 = insertelement <4 x float> %vecins4, float 0.000000e+00, i32 2; <<4 x float>> [#uses=1]
+ %vecins8 = insertelement <4 x float> %vecins6, float 0.000000e+00, i32 3; <<4 x float>> [#uses=1]
+ store <4 x float> %vecins8, <4 x float>* %a
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/vec_demanded_elts.ll b/test/Transforms/InstCombine/vec_demanded_elts.ll
new file mode 100644
index 0000000..2009a77
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -0,0 +1,47 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {fadd float}
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {fmul float}
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep {insertelement.*0.00}
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep {call.*llvm.x86.sse.mul}
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep {call.*llvm.x86.sse.sub}
+; END.
+
+define i16 @test1(float %f) {
+entry:
+ %tmp = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1]
+ %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
+ %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
+ %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
+ %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
+ %tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000000e-01, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
+ %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553500e+04, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
+ %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializer ) ; <<4 x float>> [#uses=1]
+ %tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 ) ; <i32> [#uses=1]
+ %tmp69 = trunc i32 %tmp.upgrd.1 to i16 ; <i16> [#uses=1]
+ ret i16 %tmp69
+}
+
+define i32 @test2(float %f) {
+ %tmp5 = fmul float %f, %f
+ %tmp9 = insertelement <4 x float> undef, float %tmp5, i32 0
+ %tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, i32 1
+ %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2
+ %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3
+ %tmp19 = bitcast <4 x float> %tmp12 to <4 x i32>
+ %tmp21 = extractelement <4 x i32> %tmp19, i32 0
+ ret i32 %tmp21
+}
+
+declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
+
+declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>)
+
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
+
+declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
+
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>)
diff --git a/test/Transforms/InstCombine/vec_extract_elt.ll b/test/Transforms/InstCombine/vec_extract_elt.ll
new file mode 100644
index 0000000..63e4ee2
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_extract_elt.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep extractelement
+
+define i32 @test(float %f) {
+ %tmp7 = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1]
+ %tmp17 = bitcast <4 x float> %tmp7 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp19 = extractelement <4 x i32> %tmp17, i32 0 ; <i32> [#uses=1]
+ ret i32 %tmp19
+}
+
diff --git a/test/Transforms/InstCombine/vec_insertelt.ll b/test/Transforms/InstCombine/vec_insertelt.ll
new file mode 100644
index 0000000..eedf882
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_insertelt.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep {ret <4 x i32> %A}
+
+; PR1286
+define <4 x i32> @test1(<4 x i32> %A) {
+ %B = insertelement <4 x i32> %A, i32 undef, i32 1
+ ret <4 x i32> %B
+}
diff --git a/test/Transforms/InstCombine/vec_narrow.ll b/test/Transforms/InstCombine/vec_narrow.ll
new file mode 100644
index 0000000..daf7bcf
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_narrow.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {add float}
+
+ %V = type <4 x float>
+
+define float @test(%V %A, %V %B, float %f) {
+ %C = insertelement %V %A, float %f, i32 0 ; <%V> [#uses=1]
+ %D = fadd %V %C, %B ; <%V> [#uses=1]
+ %E = extractelement %V %D, i32 0 ; <float> [#uses=1]
+ ret float %E
+}
+
diff --git a/test/Transforms/InstCombine/vec_shuffle.ll b/test/Transforms/InstCombine/vec_shuffle.ll
new file mode 100644
index 0000000..29adc1e
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_shuffle.ll
@@ -0,0 +1,89 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+%T = type <4 x float>
+
+
+define %T @test1(%T %v1) {
+; CHECK: @test1
+; CHECK: ret %T %v1
+ %v2 = shufflevector %T %v1, %T undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret %T %v2
+}
+
+define %T @test2(%T %v1) {
+; CHECK: @test2
+; CHECK: ret %T %v1
+ %v2 = shufflevector %T %v1, %T %v1, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret %T %v2
+}
+
+define float @test3(%T %A, %T %B, float %f) {
+; CHECK: @test3
+; CHECK: ret float %f
+ %C = insertelement %T %A, float %f, i32 0
+ %D = shufflevector %T %C, %T %B, <4 x i32> <i32 5, i32 0, i32 2, i32 7>
+ %E = extractelement %T %D, i32 1
+ ret float %E
+}
+
+define i32 @test4(<4 x i32> %X) {
+; CHECK: @test4
+; CHECK-NEXT: extractelement
+; CHECK-NEXT: ret
+ %tmp152.i53899.i = shufflevector <4 x i32> %X, <4 x i32> undef, <4 x i32> zeroinitializer
+ %tmp34 = extractelement <4 x i32> %tmp152.i53899.i, i32 0
+ ret i32 %tmp34
+}
+
+define i32 @test5(<4 x i32> %X) {
+; CHECK: @test5
+; CHECK-NEXT: extractelement
+; CHECK-NEXT: ret
+ %tmp152.i53899.i = shufflevector <4 x i32> %X, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 undef, i32 undef>
+ %tmp34 = extractelement <4 x i32> %tmp152.i53899.i, i32 0
+ ret i32 %tmp34
+}
+
+define float @test6(<4 x float> %X) {
+; CHECK: @test6
+; CHECK-NEXT: extractelement
+; CHECK-NEXT: ret
+ %X1 = bitcast <4 x float> %X to <4 x i32>
+ %tmp152.i53899.i = shufflevector <4 x i32> %X1, <4 x i32> undef, <4 x i32> zeroinitializer
+ %tmp152.i53900.i = bitcast <4 x i32> %tmp152.i53899.i to <4 x float>
+ %tmp34 = extractelement <4 x float> %tmp152.i53900.i, i32 0
+ ret float %tmp34
+}
+
+define <4 x float> @test7(<4 x float> %tmp45.i) {
+; CHECK: @test7
+; CHECK-NEXT: ret %T %tmp45.i
+ %tmp1642.i = shufflevector <4 x float> %tmp45.i, <4 x float> undef, <4 x i32> < i32 0, i32 1, i32 6, i32 7 >
+ ret <4 x float> %tmp1642.i
+}
+
+; This should turn into a single shuffle.
+define <4 x float> @test8(<4 x float> %tmp, <4 x float> %tmp1) {
+; CHECK: @test8
+; CHECK-NEXT: shufflevector
+; CHECK-NEXT: ret
+ %tmp4 = extractelement <4 x float> %tmp, i32 1
+ %tmp2 = extractelement <4 x float> %tmp, i32 3
+ %tmp1.upgrd.1 = extractelement <4 x float> %tmp1, i32 0
+ %tmp128 = insertelement <4 x float> undef, float %tmp4, i32 0
+ %tmp130 = insertelement <4 x float> %tmp128, float undef, i32 1
+ %tmp132 = insertelement <4 x float> %tmp130, float %tmp2, i32 2
+ %tmp134 = insertelement <4 x float> %tmp132, float %tmp1.upgrd.1, i32 3
+ ret <4 x float> %tmp134
+}
+
+; Test fold of two shuffles where the first shuffle vectors inputs are a
+; different length then the second.
+define <4 x i8> @test9(<16 x i8> %tmp6) nounwind {
+; CHECK: @test9
+; CHECK-NEXT: shufflevector
+; CHECK-NEXT: ret
+ %tmp7 = shufflevector <16 x i8> %tmp6, <16 x i8> undef, <4 x i32> < i32 13, i32 9, i32 4, i32 13 > ; <<4 x i8>> [#uses=1]
+ %tmp9 = shufflevector <4 x i8> %tmp7, <4 x i8> undef, <4 x i32> < i32 3, i32 1, i32 2, i32 0 > ; <<4 x i8>> [#uses=1]
+ ret <4 x i8> %tmp9
+} \ No newline at end of file
diff --git a/test/Transforms/InstCombine/vector-casts.ll b/test/Transforms/InstCombine/vector-casts.ll
new file mode 100644
index 0000000..470d485
--- /dev/null
+++ b/test/Transforms/InstCombine/vector-casts.ll
@@ -0,0 +1,107 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; This turns into a&1 != 0
+define <2 x i1> @test1(<2 x i64> %a) {
+ %t = trunc <2 x i64> %a to <2 x i1>
+ ret <2 x i1> %t
+
+; CHECK: @test1
+; CHECK: and <2 x i64> %a, <i64 1, i64 1>
+; CHECK: icmp ne <2 x i64> %tmp, zeroinitializer
+}
+
+; The ashr turns into an lshr.
+define <2 x i64> @test2(<2 x i64> %a) {
+ %b = and <2 x i64> %a, <i64 65535, i64 65535>
+ %t = ashr <2 x i64> %b, <i64 1, i64 1>
+ ret <2 x i64> %t
+
+; CHECK: @test2
+; CHECK: and <2 x i64> %a, <i64 65535, i64 65535>
+; CHECK: lshr <2 x i64> %b, <i64 1, i64 1>
+}
+
+
+
+define <2 x i64> @test3(<4 x float> %a, <4 x float> %b) nounwind readnone {
+entry:
+ %cmp = fcmp ord <4 x float> %a, zeroinitializer
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %cmp4 = fcmp ord <4 x float> %b, zeroinitializer
+ %sext5 = sext <4 x i1> %cmp4 to <4 x i32>
+ %and = and <4 x i32> %sext, %sext5
+ %conv = bitcast <4 x i32> %and to <2 x i64>
+ ret <2 x i64> %conv
+
+; CHECK: @test3
+; CHECK: fcmp ord <4 x float> %a, %b
+}
+
+define <2 x i64> @test4(<4 x float> %a, <4 x float> %b) nounwind readnone {
+entry:
+ %cmp = fcmp uno <4 x float> %a, zeroinitializer
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %cmp4 = fcmp uno <4 x float> %b, zeroinitializer
+ %sext5 = sext <4 x i1> %cmp4 to <4 x i32>
+ %or = or <4 x i32> %sext, %sext5
+ %conv = bitcast <4 x i32> %or to <2 x i64>
+ ret <2 x i64> %conv
+; CHECK: @test4
+; CHECK: fcmp uno <4 x float> %a, %b
+}
+
+
+
+define void @convert(<2 x i32>* %dst.addr, <2 x i64> %src) nounwind {
+entry:
+ %val = trunc <2 x i64> %src to <2 x i32>
+ %add = add <2 x i32> %val, <i32 1, i32 1>
+ store <2 x i32> %add, <2 x i32>* %dst.addr
+ ret void
+}
+
+define <2 x i65> @foo(<2 x i64> %t) {
+ %a = trunc <2 x i64> %t to <2 x i32>
+ %b = zext <2 x i32> %a to <2 x i65>
+ ret <2 x i65> %b
+}
+define <2 x i64> @bar(<2 x i65> %t) {
+ %a = trunc <2 x i65> %t to <2 x i32>
+ %b = zext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %b
+}
+define <2 x i65> @foos(<2 x i64> %t) {
+ %a = trunc <2 x i64> %t to <2 x i32>
+ %b = sext <2 x i32> %a to <2 x i65>
+ ret <2 x i65> %b
+}
+define <2 x i64> @bars(<2 x i65> %t) {
+ %a = trunc <2 x i65> %t to <2 x i32>
+ %b = sext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %b
+}
+define <2 x i64> @quxs(<2 x i64> %t) {
+ %a = trunc <2 x i64> %t to <2 x i32>
+ %b = sext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %b
+}
+define <2 x i64> @quxt(<2 x i64> %t) {
+ %a = shl <2 x i64> %t, <i64 32, i64 32>
+ %b = ashr <2 x i64> %a, <i64 32, i64 32>
+ ret <2 x i64> %b
+}
+define <2 x double> @fa(<2 x double> %t) {
+ %a = fptrunc <2 x double> %t to <2 x float>
+ %b = fpext <2 x float> %a to <2 x double>
+ ret <2 x double> %b
+}
+define <2 x double> @fb(<2 x double> %t) {
+ %a = fptoui <2 x double> %t to <2 x i64>
+ %b = uitofp <2 x i64> %a to <2 x double>
+ ret <2 x double> %b
+}
+define <2 x double> @fc(<2 x double> %t) {
+ %a = fptosi <2 x double> %t to <2 x i64>
+ %b = sitofp <2 x i64> %a to <2 x double>
+ ret <2 x double> %b
+}
diff --git a/test/Transforms/InstCombine/vector-srem.ll b/test/Transforms/InstCombine/vector-srem.ll
new file mode 100644
index 0000000..acb11c5
--- /dev/null
+++ b/test/Transforms/InstCombine/vector-srem.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {srem <4 x i32>}
+
+define <4 x i32> @foo(<4 x i32> %t, <4 x i32> %u)
+{
+ %k = sdiv <4 x i32> %t, %u
+ %l = mul <4 x i32> %k, %u
+ %m = sub <4 x i32> %t, %l
+ ret <4 x i32> %m
+}
diff --git a/test/Transforms/InstCombine/volatile_store.ll b/test/Transforms/InstCombine/volatile_store.ll
new file mode 100644
index 0000000..5316bd7
--- /dev/null
+++ b/test/Transforms/InstCombine/volatile_store.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {volatile store}
+; RUN: opt < %s -instcombine -S | grep {volatile load}
+
+@x = weak global i32 0 ; <i32*> [#uses=2]
+
+define void @self_assign_1() {
+entry:
+ %tmp = volatile load i32* @x ; <i32> [#uses=1]
+ volatile store i32 %tmp, i32* @x
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
diff --git a/test/Transforms/InstCombine/xor-undef.ll b/test/Transforms/InstCombine/xor-undef.ll
new file mode 100644
index 0000000..cf72955
--- /dev/null
+++ b/test/Transforms/InstCombine/xor-undef.ll
@@ -0,0 +1,6 @@
+; RUN: opt < %s -instcombine -S | grep zeroinitializer
+
+define <2 x i64> @f() {
+ %tmp = xor <2 x i64> undef, undef
+ ret <2 x i64> %tmp
+}
diff --git a/test/Transforms/InstCombine/xor.ll b/test/Transforms/InstCombine/xor.ll
new file mode 100644
index 0000000..a7bcdac
--- /dev/null
+++ b/test/Transforms/InstCombine/xor.ll
@@ -0,0 +1,193 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep {xor }
+; END.
+@G1 = global i32 0 ; <i32*> [#uses=1]
+@G2 = global i32 0 ; <i32*> [#uses=1]
+
+define i1 @test0(i1 %A) {
+ %B = xor i1 %A, false ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i32 @test1(i32 %A) {
+ %B = xor i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i1 @test2(i1 %A) {
+ %B = xor i1 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i32 @test3(i32 %A) {
+ %B = xor i32 %A, %A ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test4(i32 %A) {
+ %NotA = xor i32 -1, %A ; <i32> [#uses=1]
+ %B = xor i32 %A, %NotA ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test5(i32 %A) {
+ %t1 = or i32 %A, 123 ; <i32> [#uses=1]
+ %r = xor i32 %t1, 123 ; <i32> [#uses=1]
+ ret i32 %r
+}
+
+define i8 @test6(i8 %A) {
+ %B = xor i8 %A, 17 ; <i8> [#uses=1]
+ %C = xor i8 %B, 17 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i32 @test7(i32 %A, i32 %B) {
+ %A1 = and i32 %A, 7 ; <i32> [#uses=1]
+ %B1 = and i32 %B, 128 ; <i32> [#uses=1]
+ %C1 = xor i32 %A1, %B1 ; <i32> [#uses=1]
+ ret i32 %C1
+}
+
+define i8 @test8(i1 %c) {
+ %d = xor i1 %c, true ; <i1> [#uses=1]
+ br i1 %d, label %True, label %False
+
+True: ; preds = %0
+ ret i8 1
+
+False: ; preds = %0
+ ret i8 3
+}
+
+define i1 @test9(i8 %A) {
+ %B = xor i8 %A, 123 ; <i8> [#uses=1]
+ %C = icmp eq i8 %B, 34 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i8 @test10(i8 %A) {
+ %B = and i8 %A, 3 ; <i8> [#uses=1]
+ %C = xor i8 %B, 4 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i8 @test11(i8 %A) {
+ %B = or i8 %A, 12 ; <i8> [#uses=1]
+ %C = xor i8 %B, 4 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i1 @test12(i8 %A) {
+ %B = xor i8 %A, 4 ; <i8> [#uses=1]
+ %c = icmp ne i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %c
+}
+
+define i1 @test13(i8 %A, i8 %B) {
+ %C = icmp ult i8 %A, %B ; <i1> [#uses=1]
+ %D = icmp ugt i8 %A, %B ; <i1> [#uses=1]
+ %E = xor i1 %C, %D ; <i1> [#uses=1]
+ ret i1 %E
+}
+
+define i1 @test14(i8 %A, i8 %B) {
+ %C = icmp eq i8 %A, %B ; <i1> [#uses=1]
+ %D = icmp ne i8 %B, %A ; <i1> [#uses=1]
+ %E = xor i1 %C, %D ; <i1> [#uses=1]
+ ret i1 %E
+}
+
+define i32 @test15(i32 %A) {
+ %B = add i32 %A, -1 ; <i32> [#uses=1]
+ %C = xor i32 %B, -1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test16(i32 %A) {
+ %B = add i32 %A, 123 ; <i32> [#uses=1]
+ %C = xor i32 %B, -1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test17(i32 %A) {
+ %B = sub i32 123, %A ; <i32> [#uses=1]
+ %C = xor i32 %B, -1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test18(i32 %A) {
+ %B = xor i32 %A, -1 ; <i32> [#uses=1]
+ %C = sub i32 123, %B ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test19(i32 %A, i32 %B) {
+ %C = xor i32 %A, %B ; <i32> [#uses=1]
+ %D = xor i32 %C, %A ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define void @test20(i32 %A, i32 %B) {
+ %tmp.2 = xor i32 %B, %A ; <i32> [#uses=2]
+ %tmp.5 = xor i32 %tmp.2, %B ; <i32> [#uses=2]
+ %tmp.8 = xor i32 %tmp.5, %tmp.2 ; <i32> [#uses=1]
+ store i32 %tmp.8, i32* @G1
+ store i32 %tmp.5, i32* @G2
+ ret void
+}
+
+define i32 @test21(i1 %C, i32 %A, i32 %B) {
+ %C2 = xor i1 %C, true ; <i1> [#uses=1]
+ %D = select i1 %C2, i32 %A, i32 %B ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test22(i1 %X) {
+ %Y = xor i1 %X, true ; <i1> [#uses=1]
+ %Z = zext i1 %Y to i32 ; <i32> [#uses=1]
+ %Q = xor i32 %Z, 1 ; <i32> [#uses=1]
+ ret i32 %Q
+}
+
+define i1 @test23(i32 %a, i32 %b) {
+ %tmp.2 = xor i32 %b, %a ; <i32> [#uses=1]
+ %tmp.4 = icmp eq i32 %tmp.2, %a ; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
+
+define i1 @test24(i32 %c, i32 %d) {
+ %tmp.2 = xor i32 %d, %c ; <i32> [#uses=1]
+ %tmp.4 = icmp ne i32 %tmp.2, %c ; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
+
+define i32 @test25(i32 %g, i32 %h) {
+ %h2 = xor i32 %h, -1 ; <i32> [#uses=1]
+ %tmp2 = and i32 %h2, %g ; <i32> [#uses=1]
+ %tmp4 = xor i32 %tmp2, %g ; <i32> [#uses=1]
+ ret i32 %tmp4
+}
+
+define i32 @test26(i32 %a, i32 %b) {
+ %b2 = xor i32 %b, -1 ; <i32> [#uses=1]
+ %tmp2 = xor i32 %a, %b2 ; <i32> [#uses=1]
+ %tmp4 = and i32 %tmp2, %a ; <i32> [#uses=1]
+ ret i32 %tmp4
+}
+
+define i32 @test27(i32 %b, i32 %c, i32 %d) {
+ %tmp2 = xor i32 %d, %b ; <i32> [#uses=1]
+ %tmp5 = xor i32 %d, %c ; <i32> [#uses=1]
+ %tmp = icmp eq i32 %tmp2, %tmp5 ; <i1> [#uses=1]
+ %tmp6 = zext i1 %tmp to i32 ; <i32> [#uses=1]
+ ret i32 %tmp6
+}
+
+define i32 @test28(i32 %indvar) {
+ %tmp7 = add i32 %indvar, -2147483647 ; <i32> [#uses=1]
+ %tmp214 = xor i32 %tmp7, -2147483648 ; <i32> [#uses=1]
+ ret i32 %tmp214
+}
diff --git a/test/Transforms/InstCombine/xor2.ll b/test/Transforms/InstCombine/xor2.ll
new file mode 100644
index 0000000..de3d65d
--- /dev/null
+++ b/test/Transforms/InstCombine/xor2.ll
@@ -0,0 +1,53 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; PR1253
+define i1 @test0(i32 %A) {
+; CHECK: @test0
+; CHECK: %C = icmp slt i32 %A, 0
+ %B = xor i32 %A, -2147483648
+ %C = icmp sgt i32 %B, -1
+ ret i1 %C
+}
+
+define i1 @test1(i32 %A) {
+; CHECK: @test1
+; CHECK: %C = icmp slt i32 %A, 0
+ %B = xor i32 %A, 12345
+ %C = icmp slt i32 %B, 0
+ ret i1 %C
+}
+
+; PR1014
+define i32 @test2(i32 %tmp1) {
+; CHECK: @test2
+; CHECK-NEXT: or i32 %tmp1, 8
+; CHECK-NEXT: and i32
+; CHECK-NEXT: ret i32
+ %ovm = and i32 %tmp1, 32
+ %ov3 = add i32 %ovm, 145
+ %ov110 = xor i32 %ov3, 153
+ ret i32 %ov110
+}
+
+define i32 @test3(i32 %tmp1) {
+; CHECK: @test3
+; CHECK-NEXT: or i32 %tmp1, 8
+; CHECK-NEXT: and i32
+; CHECK-NEXT: ret i32
+ %ovm = or i32 %tmp1, 145
+ %ov31 = and i32 %ovm, 177
+ %ov110 = xor i32 %ov31, 153
+ ret i32 %ov110
+}
+
+define i32 @test4(i32 %A, i32 %B) {
+ %1 = xor i32 %A, -1
+ %2 = ashr i32 %1, %B
+ %3 = xor i32 %2, -1
+ ret i32 %3
+; CHECK: @test4
+; CHECK: %1 = ashr i32 %A, %B
+; CHECK: ret i32 %1
+}
diff --git a/test/Transforms/InstCombine/zero-point-zero-add.ll b/test/Transforms/InstCombine/zero-point-zero-add.ll
new file mode 100644
index 0000000..d07a9f4
--- /dev/null
+++ b/test/Transforms/InstCombine/zero-point-zero-add.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | grep 0.0 | count 1
+
+declare double @abs(double)
+
+define double @test(double %X) {
+ %Y = fadd double %X, 0.0 ;; Should be a single add x, 0.0
+ %Z = fadd double %Y, 0.0
+ ret double %Z
+}
+
+define double @test1(double %X) {
+ %Y = call double @abs(double %X)
+ %Z = fadd double %Y, 0.0
+ ret double %Z
+}
diff --git a/test/Transforms/InstCombine/zeroext-and-reduce.ll b/test/Transforms/InstCombine/zeroext-and-reduce.ll
new file mode 100644
index 0000000..592b8a1
--- /dev/null
+++ b/test/Transforms/InstCombine/zeroext-and-reduce.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {and i32 %Y, 8}
+
+define i32 @test1(i8 %X) {
+ %Y = zext i8 %X to i32 ; <i32> [#uses=1]
+ %Z = and i32 %Y, 65544 ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+
diff --git a/test/Transforms/InstCombine/zext-bool-add-sub.ll b/test/Transforms/InstCombine/zext-bool-add-sub.ll
new file mode 100644
index 0000000..1164273
--- /dev/null
+++ b/test/Transforms/InstCombine/zext-bool-add-sub.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -instcombine -S | not grep zext
+
+define i32 @a(i1 %x) {
+entry:
+ %y = zext i1 %x to i32
+ %res = add i32 %y, 1
+ ret i32 %res
+}
+
+define i32 @b(i1 %x) {
+entry:
+ %y = zext i1 %x to i32
+ %res = add i32 %y, -1
+ ret i32 %res
+}
+
+define i32 @c(i1 %x) {
+entry:
+ %y = zext i1 %x to i32
+ %res = sub i32 0, %y
+ ret i32 %res
+}
+
+define i32 @d(i1 %x) {
+entry:
+ %y = zext i1 %x to i32
+ %res = sub i32 3, %y
+ ret i32 %res
+}
diff --git a/test/Transforms/InstCombine/zext-fold.ll b/test/Transforms/InstCombine/zext-fold.ll
new file mode 100644
index 0000000..9521101
--- /dev/null
+++ b/test/Transforms/InstCombine/zext-fold.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | grep {zext } | count 1
+; PR1570
+
+define i32 @test2(float %X, float %Y) {
+entry:
+ %tmp3 = fcmp uno float %X, %Y ; <i1> [#uses=1]
+ %tmp34 = zext i1 %tmp3 to i8 ; <i8> [#uses=1]
+ %tmp = xor i8 %tmp34, 1 ; <i8> [#uses=1]
+ %toBoolnot5 = zext i8 %tmp to i32 ; <i32> [#uses=1]
+ ret i32 %toBoolnot5
+}
+
diff --git a/test/Transforms/InstCombine/zext-or-icmp.ll b/test/Transforms/InstCombine/zext-or-icmp.ll
new file mode 100644
index 0000000..969c301
--- /dev/null
+++ b/test/Transforms/InstCombine/zext-or-icmp.ll
@@ -0,0 +1,35 @@
+; RUN: opt < %s -instcombine -S | grep icmp | count 1
+
+ %struct.FooBar = type <{ i8, i8, [2 x i8], i8, i8, i8, i8, i16, i16, [4 x i8], [8 x %struct.Rock] }>
+ %struct.Rock = type { i16, i16 }
+@some_idx = internal constant [4 x i8] c"\0A\0B\0E\0F" ; <[4 x i8]*> [#uses=1]
+
+define i8 @t(%struct.FooBar* %up, i8 zeroext %intra_flag, i32 %blk_i) zeroext nounwind {
+entry:
+ %tmp2 = lshr i32 %blk_i, 1 ; <i32> [#uses=1]
+ %tmp3 = and i32 %tmp2, 2 ; <i32> [#uses=1]
+ %tmp5 = and i32 %blk_i, 1 ; <i32> [#uses=1]
+ %tmp6 = or i32 %tmp3, %tmp5 ; <i32> [#uses=1]
+ %tmp8 = getelementptr %struct.FooBar* %up, i32 0, i32 7 ; <i16*> [#uses=1]
+ %tmp9 = load i16* %tmp8, align 1 ; <i16> [#uses=1]
+ %tmp910 = zext i16 %tmp9 to i32 ; <i32> [#uses=1]
+ %tmp12 = getelementptr [4 x i8]* @some_idx, i32 0, i32 %tmp6 ; <i8*> [#uses=1]
+ %tmp13 = load i8* %tmp12, align 1 ; <i8> [#uses=1]
+ %tmp1314 = zext i8 %tmp13 to i32 ; <i32> [#uses=1]
+ %tmp151 = lshr i32 %tmp910, %tmp1314 ; <i32> [#uses=1]
+ %tmp1516 = trunc i32 %tmp151 to i8 ; <i8> [#uses=1]
+ %tmp18 = getelementptr %struct.FooBar* %up, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp19 = load i8* %tmp18, align 1 ; <i8> [#uses=1]
+ %tmp22 = and i8 %tmp1516, %tmp19 ; <i8> [#uses=1]
+ %tmp24 = getelementptr %struct.FooBar* %up, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp25 = load i8* %tmp24, align 1 ; <i8> [#uses=1]
+ %tmp26.mask = and i8 %tmp25, 1 ; <i8> [#uses=1]
+ %toBool = icmp eq i8 %tmp26.mask, 0 ; <i1> [#uses=1]
+ %toBool.not = xor i1 %toBool, true ; <i1> [#uses=1]
+ %toBool33 = icmp eq i8 %intra_flag, 0 ; <i1> [#uses=1]
+ %bothcond = or i1 %toBool.not, %toBool33 ; <i1> [#uses=1]
+ %iftmp.1.0 = select i1 %bothcond, i8 0, i8 1 ; <i8> [#uses=1]
+ %tmp40 = or i8 %tmp22, %iftmp.1.0 ; <i8> [#uses=1]
+ %tmp432 = and i8 %tmp40, 1 ; <i8> [#uses=1]
+ ret i8 %tmp432
+}
diff --git a/test/Transforms/InstCombine/zext.ll b/test/Transforms/InstCombine/zext.ll
new file mode 100644
index 0000000..10eabf7
--- /dev/null
+++ b/test/Transforms/InstCombine/zext.ll
@@ -0,0 +1,11 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i64 @test_sext_zext(i16 %A) {
+ %c1 = zext i16 %A to i32 ; <i32> [#uses=1]
+ %c2 = sext i32 %c1 to i64 ; <i64> [#uses=1]
+ ret i64 %c2
+; CHECK-NOT: %c1
+; CHECK: %c2 = zext i16 %A to i64
+; CHECK: ret i64 %c2
+}