aboutsummaryrefslogtreecommitdiffstats
path: root/test/Transforms
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2009-06-04 22:49:04 +0000
committerDan Gohman <gohman@apple.com>2009-06-04 22:49:04 +0000
commit7ce405e7aa026ef683da9eb8cc39cce87fcfa1d6 (patch)
tree768333097a76cc105813c7c636daf6259e6a0fc7 /test/Transforms
parent66bd777e9009fd6a606532f9ed96745e86f3937c (diff)
downloadexternal_llvm-7ce405e7aa026ef683da9eb8cc39cce87fcfa1d6.zip
external_llvm-7ce405e7aa026ef683da9eb8cc39cce87fcfa1d6.tar.gz
external_llvm-7ce405e7aa026ef683da9eb8cc39cce87fcfa1d6.tar.bz2
Split the Add, Sub, and Mul instruction opcodes into separate
integer and floating-point opcodes, introducing FAdd, FSub, and FMul. For now, the AsmParser, BitcodeReader, and IRBuilder all preserve backwards compatability, and the Core LLVM APIs preserve backwards compatibility for IR producers. Most front-ends won't need to change immediately. This implements the first step of the plan outlined here: http://nondot.org/sabre/LLVMNotes/IntegerOverflow.txt git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@72897 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms')
-rw-r--r--test/Transforms/ConstProp/calls.ll6
-rw-r--r--test/Transforms/DeadStoreElimination/2006-06-27-AST-Remove.ll2
-rw-r--r--test/Transforms/GVNPRE/2007-06-18-ConstantInPhi.ll4
-rw-r--r--test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll2
-rw-r--r--test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll4
-rw-r--r--test/Transforms/GlobalOpt/constantexpr-dangle.ll2
-rw-r--r--test/Transforms/IndVarSimplify/2006-12-10-BitCast.ll2
-rw-r--r--test/Transforms/IndVarSimplify/2008-11-03-Floating.ll8
-rw-r--r--test/Transforms/IndVarSimplify/2008-11-17-Floating.ll4
-rw-r--r--test/Transforms/IndVarSimplify/2008-11-25-APFloatAssert.ll2
-rw-r--r--test/Transforms/IndVarSimplify/2009-04-27-Floating.ll2
-rw-r--r--test/Transforms/IndVarSimplify/iv-zext.ll6
-rw-r--r--test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll4
-rw-r--r--test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll4
-rw-r--r--test/Transforms/InstCombine/2008-07-16-fsub.ll2
-rw-r--r--test/Transforms/InstCombine/add-sitofp.ll2
-rw-r--r--test/Transforms/InstCombine/dce-iterate.ll2
-rw-r--r--test/Transforms/InstCombine/fpextend.ll4
-rw-r--r--test/Transforms/InstCombine/mul.ll4
-rw-r--r--test/Transforms/InstCombine/multi-use-or.ll2
-rw-r--r--test/Transforms/InstCombine/shufflemask-undef.ll10
-rw-r--r--test/Transforms/InstCombine/signed-comparison.ll2
-rw-r--r--test/Transforms/InstCombine/sitofp.ll2
-rw-r--r--test/Transforms/InstCombine/vec_demanded_elts.ll6
-rw-r--r--test/Transforms/InstCombine/vec_narrow.ll2
-rw-r--r--test/Transforms/InstCombine/zero-point-zero-add.ll6
-rw-r--r--test/Transforms/LCSSA/2007-07-12-LICM-2.ll8
-rw-r--r--test/Transforms/LCSSA/2007-07-12-LICM-3.ll8
-rw-r--r--test/Transforms/LCSSA/2007-07-12-LICM.ll2
-rw-r--r--test/Transforms/LoopIndexSplit/2007-09-24-UpdateIterationSpace.ll10
-rw-r--r--test/Transforms/LoopIndexSplit/2007-09-25-UpdateIterationSpace-2.ll10
-rw-r--r--test/Transforms/Mem2Reg/PromoteMemToRegister.ll2
-rw-r--r--test/Transforms/MemCpyOpt/memcpy.ll2
-rw-r--r--test/Transforms/MemCpyOpt/sret.ll2
-rw-r--r--test/Transforms/PruneEH/2008-09-05-CGUpdate.ll10
-rw-r--r--test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll4
-rw-r--r--test/Transforms/SCCP/2006-12-04-PackedType.ll4
-rw-r--r--test/Transforms/SCCP/apint-ipsccp4.ll4
-rw-r--r--test/Transforms/ScalarRepl/2009-03-17-CleanUp.ll64
-rw-r--r--test/Transforms/ScalarRepl/copy-aggregate.ll2
-rw-r--r--test/Transforms/ScalarRepl/memcpy-from-global.ll16
-rw-r--r--test/Transforms/ScalarRepl/vector_promote.ll12
-rw-r--r--test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll14
-rw-r--r--test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll2
-rw-r--r--test/Transforms/SimplifyCFG/2008-04-27-MultipleReturnCrash.ll2
-rw-r--r--test/Transforms/SimplifyCFG/2009-05-12-externweak.ll2
-rw-r--r--test/Transforms/SimplifyLibCalls/half-powr.ll2
47 files changed, 139 insertions, 139 deletions
diff --git a/test/Transforms/ConstProp/calls.ll b/test/Transforms/ConstProp/calls.ll
index 126db4c..c573e56 100644
--- a/test/Transforms/ConstProp/calls.ll
+++ b/test/Transforms/ConstProp/calls.ll
@@ -13,11 +13,11 @@ declare i1 @llvm.isunordered.f64(double, double)
define double @T() {
%A = call double @cos( double 0.000000e+00 ) ; <double> [#uses=1]
%B = call double @sin( double 0.000000e+00 ) ; <double> [#uses=1]
- %a = add double %A, %B ; <double> [#uses=1]
+ %a = fadd double %A, %B ; <double> [#uses=1]
%C = call double @tan( double 0.000000e+00 ) ; <double> [#uses=1]
- %b = add double %a, %C ; <double> [#uses=1]
+ %b = fadd double %a, %C ; <double> [#uses=1]
%D = call double @sqrt( double 4.000000e+00 ) ; <double> [#uses=1]
- %c = add double %b, %D ; <double> [#uses=1]
+ %c = fadd double %b, %D ; <double> [#uses=1]
ret double %c
}
diff --git a/test/Transforms/DeadStoreElimination/2006-06-27-AST-Remove.ll b/test/Transforms/DeadStoreElimination/2006-06-27-AST-Remove.ll
index 50dcf32..3b3f8ad 100644
--- a/test/Transforms/DeadStoreElimination/2006-06-27-AST-Remove.ll
+++ b/test/Transforms/DeadStoreElimination/2006-06-27-AST-Remove.ll
@@ -601,7 +601,7 @@ entry:
%tmp21362 = icmp eq i32 0, 0 ; <i1> [#uses=2]
%tmp216 = sitofp i32 %pn_restart.0.ph to float ; <float> [#uses=1]
%tmp216.upgrd.177 = fpext float %tmp216 to double ; <double> [#uses=1]
- %tmp217 = add double %tmp216.upgrd.177, 1.000000e+00 ; <double> [#uses=1]
+ %tmp217 = fadd double %tmp216.upgrd.177, 1.000000e+00 ; <double> [#uses=1]
%tmp835 = icmp sgt i32 %pn_restart.0.ph, 9 ; <i1> [#uses=0]
store i32 0, i32* @nodes
store i32 0, i32* @qnodes
diff --git a/test/Transforms/GVNPRE/2007-06-18-ConstantInPhi.ll b/test/Transforms/GVNPRE/2007-06-18-ConstantInPhi.ll
index b4cb517..180105a 100644
--- a/test/Transforms/GVNPRE/2007-06-18-ConstantInPhi.ll
+++ b/test/Transforms/GVNPRE/2007-06-18-ConstantInPhi.ll
@@ -10,8 +10,8 @@ bb.nph: ; preds = %entry
bb34: ; preds = %bb34, %bb.nph
%p.1 = phi float [ 0x3FE6A09E60000000, %bb.nph ], [ %tmp48, %bb34 ] ; <float> [#uses=1]
%tmp44 = load float* null ; <float> [#uses=1]
- %tmp46 = sub float %tmp44, 0.000000e+00 ; <float> [#uses=1]
- %tmp48 = mul float %tmp46, %p.1 ; <float> [#uses=1]
+ %tmp46 = fsub float %tmp44, 0.000000e+00 ; <float> [#uses=1]
+ %tmp48 = fmul float %tmp46, %p.1 ; <float> [#uses=1]
br i1 false, label %bb57, label %bb34
bb57: ; preds = %bb34
diff --git a/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll b/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
index 0a8dd49..779e7fb 100644
--- a/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
+++ b/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
@@ -4,6 +4,6 @@
define double @foo() nounwind {
entry:
%tmp1 = volatile load double* @t0.1441, align 8 ; <double> [#uses=2]
- %tmp4 = mul double %tmp1, %tmp1 ; <double> [#uses=1]
+ %tmp4 = fmul double %tmp1, %tmp1 ; <double> [#uses=1]
ret double %tmp4
}
diff --git a/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll b/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll
index 3464be9..8a0b5b3 100644
--- a/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll
+++ b/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll
@@ -26,7 +26,7 @@ define double @test2() {
%V1 = load double* getelementptr (%T* @G, i32 0, i32 0), align 16
%V2 = load double* getelementptr (%T* @G, i32 0, i32 1), align 8
%V3 = load double* getelementptr (%T* @G, i32 0, i32 2), align 16
- %R = add double %V1, %V2
- %R2 = add double %R, %V3
+ %R = fadd double %V1, %V2
+ %R2 = fadd double %R, %V3
ret double %R2
}
diff --git a/test/Transforms/GlobalOpt/constantexpr-dangle.ll b/test/Transforms/GlobalOpt/constantexpr-dangle.ll
index 6e33ae0..6fa139b 100644
--- a/test/Transforms/GlobalOpt/constantexpr-dangle.ll
+++ b/test/Transforms/GlobalOpt/constantexpr-dangle.ll
@@ -7,7 +7,7 @@ define internal float @foo() {
define float @bar() {
%tmp1 = call float (...)* bitcast (float ()* @foo to float (...)*)( )
- %tmp2 = mul float %tmp1, 1.000000e+01 ; <float> [#uses=1]
+ %tmp2 = fmul float %tmp1, 1.000000e+01 ; <float> [#uses=1]
ret float %tmp2
}
diff --git a/test/Transforms/IndVarSimplify/2006-12-10-BitCast.ll b/test/Transforms/IndVarSimplify/2006-12-10-BitCast.ll
index 903e81d..b2f8258 100644
--- a/test/Transforms/IndVarSimplify/2006-12-10-BitCast.ll
+++ b/test/Transforms/IndVarSimplify/2006-12-10-BitCast.ll
@@ -18,7 +18,7 @@ cond_true52: ; preds = %cond_true27
cond_next182.i: ; preds = %cond_next182.i, %cond_true52
%decay.i.0 = phi i32 [ %tmp195.i.upgrd.1, %cond_next182.i ], [ %tmp152.i, %cond_true52 ] ; <i32> [#uses=1]
%tmp194.i53 = bitcast i32 %decay.i.0 to float ; <float> [#uses=1]
- %tmp195.i = sub float %tmp194.i53, 8.000000e+00 ; <float> [#uses=1]
+ %tmp195.i = fsub float %tmp194.i53, 8.000000e+00 ; <float> [#uses=1]
%tmp195.i.upgrd.1 = bitcast float %tmp195.i to i32 ; <i32> [#uses=1]
br i1 false, label %cond_next182.i, label %bb418.i.preheader
diff --git a/test/Transforms/IndVarSimplify/2008-11-03-Floating.ll b/test/Transforms/IndVarSimplify/2008-11-03-Floating.ll
index 6fc065f..be8b36f 100644
--- a/test/Transforms/IndVarSimplify/2008-11-03-Floating.ll
+++ b/test/Transforms/IndVarSimplify/2008-11-03-Floating.ll
@@ -6,7 +6,7 @@ entry:
bb: ; preds = %bb, %entry
%x.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2]
%0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0]
- %1 = add double %x.0.reg2mem.0, 1.000000e+00 ; <double> [#uses=2]
+ %1 = fadd double %x.0.reg2mem.0, 1.000000e+00 ; <double> [#uses=2]
%2 = fcmp olt double %1, 1.000000e+04 ; <i1> [#uses=1]
br i1 %2, label %bb, label %return
@@ -23,7 +23,7 @@ entry:
bb: ; preds = %bb, %entry
%x.0.reg2mem.0 = phi double [ -10.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2]
%0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0]
- %1 = add double %x.0.reg2mem.0, 2.000000e+00 ; <double> [#uses=2]
+ %1 = fadd double %x.0.reg2mem.0, 2.000000e+00 ; <double> [#uses=2]
%2 = fcmp olt double %1, -1.000000e+00 ; <i1> [#uses=1]
br i1 %2, label %bb, label %return
@@ -39,7 +39,7 @@ entry:
bb: ; preds = %bb, %entry
%x.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2]
%0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0]
- %1 = add double %x.0.reg2mem.0, 1.000000e+00 ; <double> [#uses=2]
+ %1 = fadd double %x.0.reg2mem.0, 1.000000e+00 ; <double> [#uses=2]
%2 = fcmp olt double %1, -1.000000e+00 ; <i1> [#uses=1]
br i1 %2, label %bb, label %return
@@ -54,7 +54,7 @@ entry:
bb: ; preds = %bb, %entry
%x.0.reg2mem.0 = phi double [ 40.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2]
%0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0]
- %1 = add double %x.0.reg2mem.0, -1.000000e+00 ; <double> [#uses=2]
+ %1 = fadd double %x.0.reg2mem.0, -1.000000e+00 ; <double> [#uses=2]
%2 = fcmp olt double %1, 1.000000e+00 ; <i1> [#uses=1]
br i1 %2, label %bb, label %return
diff --git a/test/Transforms/IndVarSimplify/2008-11-17-Floating.ll b/test/Transforms/IndVarSimplify/2008-11-17-Floating.ll
index faf1da3..c947d3b 100644
--- a/test/Transforms/IndVarSimplify/2008-11-17-Floating.ll
+++ b/test/Transforms/IndVarSimplify/2008-11-17-Floating.ll
@@ -9,7 +9,7 @@ entry:
bb: ; preds = %bb, %entry
%x.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2]
%0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0]
- %1 = add double %x.0.reg2mem.0, 1.0e+0 ; <double> [#uses=2]
+ %1 = fadd double %x.0.reg2mem.0, 1.0e+0 ; <double> [#uses=2]
%2 = fcmp olt double %1, 2147483646.0e+0 ; <i1> [#uses=1]
br i1 %2, label %bb, label %return
@@ -24,7 +24,7 @@ entry:
bb: ; preds = %bb, %entry
%x.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2]
%0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0]
- %1 = add double %x.0.reg2mem.0, 1.0e+0 ; <double> [#uses=2]
+ %1 = fadd double %x.0.reg2mem.0, 1.0e+0 ; <double> [#uses=2]
%2 = fcmp olt double %1, 2147483647.0e+0 ; <i1> [#uses=1]
br i1 %2, label %bb, label %return
diff --git a/test/Transforms/IndVarSimplify/2008-11-25-APFloatAssert.ll b/test/Transforms/IndVarSimplify/2008-11-25-APFloatAssert.ll
index 9fd0eb9..e611b1f 100644
--- a/test/Transforms/IndVarSimplify/2008-11-25-APFloatAssert.ll
+++ b/test/Transforms/IndVarSimplify/2008-11-25-APFloatAssert.ll
@@ -6,6 +6,6 @@ entry:
bb23.i91: ; preds = %bb23.i91, %entry
%result.0.i89 = phi ppc_fp128 [ 0xM00000000000000000000000000000000, %entry ], [ %0, %bb23.i91 ] ; <ppc_fp128> [#uses=2]
- %0 = mul ppc_fp128 %result.0.i89, %result.0.i89 ; <ppc_fp128> [#uses=1]
+ %0 = fmul ppc_fp128 %result.0.i89, %result.0.i89 ; <ppc_fp128> [#uses=1]
br label %bb23.i91
}
diff --git a/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll b/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll
index 700f294..e70d577 100644
--- a/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll
+++ b/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll
@@ -9,7 +9,7 @@ entry:
loop_body:
%i = phi float [ %nexti, %loop_body ], [ 0.0, %entry ]
tail call void @foo()
- %nexti = add float %i, 1.0
+ %nexti = fadd float %i, 1.0
%less = fcmp olt float %nexti, 2.0
br i1 %less, label %loop_body, label %done
diff --git a/test/Transforms/IndVarSimplify/iv-zext.ll b/test/Transforms/IndVarSimplify/iv-zext.ll
index 76d48de..d7eb7bd 100644
--- a/test/Transforms/IndVarSimplify/iv-zext.ll
+++ b/test/Transforms/IndVarSimplify/iv-zext.ll
@@ -13,16 +13,16 @@ loop:
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double* %d, i64 %indvar.i8
%t1 = load double* %t0
- %t2 = mul double %t1, 0.1
+ %t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double* %d, i64 %indvar.i24
%t4 = load double* %t3
- %t5 = mul double %t4, 2.3
+ %t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double* %d, i64 %indvar
%t7 = load double* %t6
- %t8 = mul double %t7, 4.5
+ %t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, 10
diff --git a/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll b/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll
index 5ad0af4..c7cf0dd 100644
--- a/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll
+++ b/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll
@@ -2,8 +2,8 @@
; RUN: grep mul | count 2
define <4 x float> @test(<4 x float> %V) {
- %Y = mul <4 x float> %V, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
- %Z = mul <4 x float> %Y, < float 1.000000e+00, float 2.000000e+05, float -3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
+ %Y = fmul <4 x float> %V, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
+ %Z = fmul <4 x float> %Y, < float 1.000000e+00, float 2.000000e+05, float -3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
ret <4 x float> %Z
}
diff --git a/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll b/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll
index 60ee503..eaf10a3 100644
--- a/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll
+++ b/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll
@@ -3,7 +3,7 @@
define <4 x float> @test(<4 x float> %tmp26, <4 x float> %tmp53) {
; (X+Y)-Y != X for fp vectors.
- %tmp64 = add <4 x float> %tmp26, %tmp53 ; <<4 x float>> [#uses=1]
- %tmp75 = sub <4 x float> %tmp64, %tmp53 ; <<4 x float>> [#uses=1]
+ %tmp64 = fadd <4 x float> %tmp26, %tmp53 ; <<4 x float>> [#uses=1]
+ %tmp75 = fsub <4 x float> %tmp64, %tmp53 ; <<4 x float>> [#uses=1]
ret <4 x float> %tmp75
}
diff --git a/test/Transforms/InstCombine/2008-07-16-fsub.ll b/test/Transforms/InstCombine/2008-07-16-fsub.ll
index 1d0554d..ca4174d 100644
--- a/test/Transforms/InstCombine/2008-07-16-fsub.ll
+++ b/test/Transforms/InstCombine/2008-07-16-fsub.ll
@@ -3,6 +3,6 @@
define double @test(double %X) nounwind {
; fsub of self can't be optimized away.
- %Y = sub double %X, %X
+ %Y = fsub double %X, %X
ret double %Y
}
diff --git a/test/Transforms/InstCombine/add-sitofp.ll b/test/Transforms/InstCombine/add-sitofp.ll
index 35c6567..298b9a1 100644
--- a/test/Transforms/InstCombine/add-sitofp.ll
+++ b/test/Transforms/InstCombine/add-sitofp.ll
@@ -4,6 +4,6 @@ define double @x(i32 %a, i32 %b) nounwind {
%m = lshr i32 %a, 24
%n = and i32 %m, %b
%o = sitofp i32 %n to double
- %p = add double %o, 1.0
+ %p = fadd double %o, 1.0
ret double %p
}
diff --git a/test/Transforms/InstCombine/dce-iterate.ll b/test/Transforms/InstCombine/dce-iterate.ll
index e222970..faefa8a 100644
--- a/test/Transforms/InstCombine/dce-iterate.ll
+++ b/test/Transforms/InstCombine/dce-iterate.ll
@@ -18,7 +18,7 @@ entry:
%c = lshr i960 %sz101112.ins, 320 ; <i960> [#uses=1]
%d = trunc i960 %c to i64 ; <i64> [#uses=1]
%e = bitcast i64 %d to double ; <double> [#uses=1]
- %f = add double %b, %e
+ %f = fadd double %b, %e
ret double %e
}
diff --git a/test/Transforms/InstCombine/fpextend.ll b/test/Transforms/InstCombine/fpextend.ll
index 5971080..c212128 100644
--- a/test/Transforms/InstCombine/fpextend.ll
+++ b/test/Transforms/InstCombine/fpextend.ll
@@ -6,7 +6,7 @@ define void @test() nounwind {
entry:
%tmp = load float* @X, align 4 ; <float> [#uses=1]
%tmp1 = fpext float %tmp to double ; <double> [#uses=1]
- %tmp3 = add double %tmp1, 0.000000e+00 ; <double> [#uses=1]
+ %tmp3 = fadd double %tmp1, 0.000000e+00 ; <double> [#uses=1]
%tmp34 = fptrunc double %tmp3 to float ; <float> [#uses=1]
store float %tmp34, float* @X, align 4
ret void
@@ -28,7 +28,7 @@ define void @test4() nounwind {
entry:
%tmp = load float* @X, align 4 ; <float> [#uses=1]
%tmp1 = fpext float %tmp to double ; <double> [#uses=1]
- %tmp2 = sub double -0.000000e+00, %tmp1 ; <double> [#uses=1]
+ %tmp2 = fsub double -0.000000e+00, %tmp1 ; <double> [#uses=1]
%tmp34 = fptrunc double %tmp2 to float ; <float> [#uses=1]
store float %tmp34, float* @X, align 4
ret void
diff --git a/test/Transforms/InstCombine/mul.ll b/test/Transforms/InstCombine/mul.ll
index 0455a3f..9b5f7a5 100644
--- a/test/Transforms/InstCombine/mul.ll
+++ b/test/Transforms/InstCombine/mul.ll
@@ -20,7 +20,7 @@ define i32 @test3(i32 %A) {
define double @test4(double %A) {
; This is safe for FP
- %B = mul double 1.000000e+00, %A ; <double> [#uses=1]
+ %B = fmul double 1.000000e+00, %A ; <double> [#uses=1]
ret double %B
}
@@ -79,7 +79,7 @@ define i32 @test12(i8 %a, i32 %b) {
; PR2642
define internal void @test13(<4 x float>*) {
load <4 x float>* %0, align 1
- mul <4 x float> %2, < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >
+ fmul <4 x float> %2, < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >
store <4 x float> %3, <4 x float>* %0, align 1
ret void
}
diff --git a/test/Transforms/InstCombine/multi-use-or.ll b/test/Transforms/InstCombine/multi-use-or.ll
index 85a8b34..4804967 100644
--- a/test/Transforms/InstCombine/multi-use-or.ll
+++ b/test/Transforms/InstCombine/multi-use-or.ll
@@ -17,7 +17,7 @@ entry:
%c = lshr i192 %sy222324.ins, 128 ; <i192> [#uses=1]
%d = trunc i192 %c to i64 ; <i64> [#uses=1]
%e = bitcast i64 %d to double ; <double> [#uses=1]
- %f = add double %b, %e
+ %f = fadd double %b, %e
; ret double %e
ret double %f
diff --git a/test/Transforms/InstCombine/shufflemask-undef.ll b/test/Transforms/InstCombine/shufflemask-undef.ll
index 2438417..a9e8d34 100644
--- a/test/Transforms/InstCombine/shufflemask-undef.ll
+++ b/test/Transforms/InstCombine/shufflemask-undef.ll
@@ -75,16 +75,16 @@ bb266.i:
shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:3 [#uses=1]
shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:4 [#uses=1]
shufflevector <4 x float> %4, <4 x float> %3, <4 x i32> < i32 6, i32 7, i32 2, i32 3 > ; <<4 x float>>:5 [#uses=1]
- mul <4 x float> %5, zeroinitializer ; <<4 x float>>:6 [#uses=2]
- mul <4 x float> %6, %6 ; <<4 x float>>:7 [#uses=1]
- add <4 x float> zeroinitializer, %7 ; <<4 x float>>:8 [#uses=1]
+ fmul <4 x float> %5, zeroinitializer ; <<4 x float>>:6 [#uses=2]
+ fmul <4 x float> %6, %6 ; <<4 x float>>:7 [#uses=1]
+ fadd <4 x float> zeroinitializer, %7 ; <<4 x float>>:8 [#uses=1]
call <4 x float> @llvm.x86.sse.max.ps( <4 x float> zeroinitializer, <4 x float> %8 ) nounwind readnone ; <<4 x float>>:9 [#uses=1]
%phitmp40 = bitcast <4 x float> %9 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp4109.i = and <4 x i32> %phitmp40, < i32 8388607, i32 8388607, i32 8388607, i32 8388607 > ; <<4 x i32>> [#uses=1]
%tmp4116.i = or <4 x i32> %tmp4109.i, < i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216 > ; <<4 x i32>> [#uses=1]
%tmp4117.i = bitcast <4 x i32> %tmp4116.i to <4 x float> ; <<4 x float>> [#uses=1]
- add <4 x float> %tmp4117.i, zeroinitializer ; <<4 x float>>:10 [#uses=1]
- mul <4 x float> %10, < float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01 > ; <<4 x float>>:11 [#uses=1]
+ fadd <4 x float> %tmp4117.i, zeroinitializer ; <<4 x float>>:10 [#uses=1]
+ fmul <4 x float> %10, < float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01 > ; <<4 x float>>:11 [#uses=1]
call <4 x float> @llvm.x86.sse.max.ps( <4 x float> %11, <4 x float> zeroinitializer ) nounwind readnone ; <<4 x float>>:12 [#uses=1]
call <4 x float> @llvm.x86.sse.min.ps( <4 x float> %12, <4 x float> zeroinitializer ) nounwind readnone ; <<4 x float>>:13 [#uses=1]
%tmp4170.i = call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %13, <4 x float> zeroinitializer, i8 2 ) nounwind ; <<4 x float>> [#uses=1]
diff --git a/test/Transforms/InstCombine/signed-comparison.ll b/test/Transforms/InstCombine/signed-comparison.ll
index fdf150f..86e07ec7 100644
--- a/test/Transforms/InstCombine/signed-comparison.ll
+++ b/test/Transforms/InstCombine/signed-comparison.ll
@@ -14,7 +14,7 @@ bb:
%t0 = and i64 %indvar, 65535
%t1 = getelementptr double* %p, i64 %t0
%t2 = load double* %t1, align 8
- %t3 = mul double %t2, 2.2
+ %t3 = fmul double %t2, 2.2
store double %t3, double* %t1, align 8
%i.04 = trunc i64 %indvar to i16
%t4 = add i16 %i.04, 1
diff --git a/test/Transforms/InstCombine/sitofp.ll b/test/Transforms/InstCombine/sitofp.ll
index c26c351..2bf7385 100644
--- a/test/Transforms/InstCombine/sitofp.ll
+++ b/test/Transforms/InstCombine/sitofp.ll
@@ -36,7 +36,7 @@ define i32 @test6(i32 %A) {
%C = and i32 %A, 32 ; <i32> [#uses=1]
%D = sitofp i32 %B to double ; <double> [#uses=1]
%E = sitofp i32 %C to double ; <double> [#uses=1]
- %F = add double %D, %E ; <double> [#uses=1]
+ %F = fadd double %D, %E ; <double> [#uses=1]
%G = fptosi double %F to i32 ; <i32> [#uses=1]
ret i32 %G
}
diff --git a/test/Transforms/InstCombine/vec_demanded_elts.ll b/test/Transforms/InstCombine/vec_demanded_elts.ll
index 03e070f..95df8c6 100644
--- a/test/Transforms/InstCombine/vec_demanded_elts.ll
+++ b/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -1,7 +1,7 @@
; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
-; RUN: grep {sub float}
+; RUN: grep {fadd float}
; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
-; RUN: grep {mul float}
+; RUN: grep {fmul float}
; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
; RUN: not grep {insertelement.*0.00}
; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
@@ -26,7 +26,7 @@ entry:
}
define i32 @test2(float %f) {
- %tmp5 = mul float %f, %f
+ %tmp5 = fmul float %f, %f
%tmp9 = insertelement <4 x float> undef, float %tmp5, i32 0
%tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, i32 1
%tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2
diff --git a/test/Transforms/InstCombine/vec_narrow.ll b/test/Transforms/InstCombine/vec_narrow.ll
index 9063148..e444c2a 100644
--- a/test/Transforms/InstCombine/vec_narrow.ll
+++ b/test/Transforms/InstCombine/vec_narrow.ll
@@ -5,7 +5,7 @@
define float @test(%V %A, %V %B, float %f) {
%C = insertelement %V %A, float %f, i32 0 ; <%V> [#uses=1]
- %D = add %V %C, %B ; <%V> [#uses=1]
+ %D = fadd %V %C, %B ; <%V> [#uses=1]
%E = extractelement %V %D, i32 0 ; <float> [#uses=1]
ret float %E
}
diff --git a/test/Transforms/InstCombine/zero-point-zero-add.ll b/test/Transforms/InstCombine/zero-point-zero-add.ll
index bae60d9..adb28e4 100644
--- a/test/Transforms/InstCombine/zero-point-zero-add.ll
+++ b/test/Transforms/InstCombine/zero-point-zero-add.ll
@@ -3,13 +3,13 @@
declare double @abs(double)
define double @test(double %X) {
- %Y = add double %X, 0.0 ;; Should be a single add x, 0.0
- %Z = add double %Y, 0.0
+ %Y = fadd double %X, 0.0 ;; Should be a single add x, 0.0
+ %Z = fadd double %Y, 0.0
ret double %Z
}
define double @test1(double %X) {
%Y = call double @abs(double %X)
- %Z = add double %Y, 0.0
+ %Z = fadd double %Y, 0.0
ret double %Z
}
diff --git a/test/Transforms/LCSSA/2007-07-12-LICM-2.ll b/test/Transforms/LCSSA/2007-07-12-LICM-2.ll
index 58bb19d..e8dc391 100644
--- a/test/Transforms/LCSSA/2007-07-12-LICM-2.ll
+++ b/test/Transforms/LCSSA/2007-07-12-LICM-2.ll
@@ -5,10 +5,10 @@ entry:
bb7: ; preds = %bb7, %entry
%tmp39 = load <4 x float>* null ; <<4 x float>> [#uses=1]
- %tmp40 = add <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=1]
- %tmp43 = add <4 x float> %tmp40, < float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 2.000000e+00 > ; <<4 x float>> [#uses=1]
- %tmp46 = add <4 x float> %tmp43, < float 3.000000e+00, float 0.000000e+00, float 2.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
- %tmp49 = add <4 x float> %tmp46, < float 0.000000e+00, float 4.000000e+00, float 6.000000e+00, float 1.000000e+00 > ; <<4 x float>> [#uses=1]
+ %tmp40 = fadd <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=1]
+ %tmp43 = fadd <4 x float> %tmp40, < float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 2.000000e+00 > ; <<4 x float>> [#uses=1]
+ %tmp46 = fadd <4 x float> %tmp43, < float 3.000000e+00, float 0.000000e+00, float 2.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
+ %tmp49 = fadd <4 x float> %tmp46, < float 0.000000e+00, float 4.000000e+00, float 6.000000e+00, float 1.000000e+00 > ; <<4 x float>> [#uses=1]
store <4 x float> %tmp49, <4 x float>* null
br i1 false, label %bb7, label %bb56
diff --git a/test/Transforms/LCSSA/2007-07-12-LICM-3.ll b/test/Transforms/LCSSA/2007-07-12-LICM-3.ll
index 79370ee..72cebed 100644
--- a/test/Transforms/LCSSA/2007-07-12-LICM-3.ll
+++ b/test/Transforms/LCSSA/2007-07-12-LICM-3.ll
@@ -9,10 +9,10 @@ bb: ; preds = %bb56, %entry
bb7: ; preds = %bb7, %bb
%tmp39 = load <4 x float>* null ; <<4 x float>> [#uses=1]
- %tmp40 = add <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=1]
- %tmp43 = add <4 x float> %tmp40, < float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 2.000000e+00 > ; <<4 x float>> [#uses=1]
- %tmp46 = add <4 x float> %tmp43, < float 3.000000e+00, float 0.000000e+00, float 2.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
- %tmp49 = add <4 x float> %tmp46, < float 0.000000e+00, float 4.000000e+00, float 6.000000e+00, float 1.000000e+00 > ; <<4 x float>> [#uses=1]
+ %tmp40 = fadd <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=1]
+ %tmp43 = fadd <4 x float> %tmp40, < float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 2.000000e+00 > ; <<4 x float>> [#uses=1]
+ %tmp46 = fadd <4 x float> %tmp43, < float 3.000000e+00, float 0.000000e+00, float 2.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
+ %tmp49 = fadd <4 x float> %tmp46, < float 0.000000e+00, float 4.000000e+00, float 6.000000e+00, float 1.000000e+00 > ; <<4 x float>> [#uses=1]
store <4 x float> %tmp49, <4 x float>* null
br i1 false, label %bb7, label %bb56
diff --git a/test/Transforms/LCSSA/2007-07-12-LICM.ll b/test/Transforms/LCSSA/2007-07-12-LICM.ll
index 1c9830e..0c433c3 100644
--- a/test/Transforms/LCSSA/2007-07-12-LICM.ll
+++ b/test/Transforms/LCSSA/2007-07-12-LICM.ll
@@ -5,7 +5,7 @@ entry:
bb7: ; preds = %bb7, %entry
%tmp39 = load <4 x float>* null ; <<4 x float>> [#uses=1]
- %tmp40 = add <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=0]
+ %tmp40 = fadd <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* null
br i1 false, label %bb7, label %bb56
diff --git a/test/Transforms/LoopIndexSplit/2007-09-24-UpdateIterationSpace.ll b/test/Transforms/LoopIndexSplit/2007-09-24-UpdateIterationSpace.ll
index ec29847..928fd95 100644
--- a/test/Transforms/LoopIndexSplit/2007-09-24-UpdateIterationSpace.ll
+++ b/test/Transforms/LoopIndexSplit/2007-09-24-UpdateIterationSpace.ll
@@ -8,7 +8,7 @@ entry:
bb.preheader: ; preds = %entry
%tmp3031 = fpext float %contribution to double ; <double> [#uses=1]
- %tmp32 = mul double %tmp3031, 5.000000e-01 ; <double> [#uses=1]
+ %tmp32 = fmul double %tmp3031, 5.000000e-01 ; <double> [#uses=1]
%tmp3839 = fpext float %sigmal to double ; <double> [#uses=1]
br label %bb
@@ -22,19 +22,19 @@ bb: ; preds = %bb.preheader, %cond_next45
cond_true9: ; preds = %bb
%tmp12 = getelementptr float* %x, i32 %i.01.0 ; <float*> [#uses=1]
%tmp13 = load float* %tmp12, align 4 ; <float> [#uses=1]
- %tmp15 = sub float %xcen, %tmp13 ; <float> [#uses=1]
+ %tmp15 = fsub float %xcen, %tmp13 ; <float> [#uses=1]
%tmp16 = tail call float @fabsf( float %tmp15 ) ; <float> [#uses=1]
%tmp18 = fdiv float %tmp16, %sigmal ; <float> [#uses=1]
%tmp21 = load float** %y, align 4 ; <float*> [#uses=2]
%tmp27 = getelementptr float* %tmp21, i32 %i.01.0 ; <float*> [#uses=1]
%tmp28 = load float* %tmp27, align 4 ; <float> [#uses=1]
%tmp2829 = fpext float %tmp28 to double ; <double> [#uses=1]
- %tmp34 = sub float -0.000000e+00, %tmp18 ; <float> [#uses=1]
+ %tmp34 = fsub float -0.000000e+00, %tmp18 ; <float> [#uses=1]
%tmp3435 = fpext float %tmp34 to double ; <double> [#uses=1]
%tmp36 = tail call double @exp( double %tmp3435 ) ; <double> [#uses=1]
- %tmp37 = mul double %tmp32, %tmp36 ; <double> [#uses=1]
+ %tmp37 = fmul double %tmp32, %tmp36 ; <double> [#uses=1]
%tmp40 = fdiv double %tmp37, %tmp3839 ; <double> [#uses=1]
- %tmp41 = add double %tmp2829, %tmp40 ; <double> [#uses=1]
+ %tmp41 = fadd double %tmp2829, %tmp40 ; <double> [#uses=1]
%tmp4142 = fptrunc double %tmp41 to float ; <float> [#uses=1]
%tmp44 = getelementptr float* %tmp21, i32 %i.01.0 ; <float*> [#uses=1]
store float %tmp4142, float* %tmp44, align 4
diff --git a/test/Transforms/LoopIndexSplit/2007-09-25-UpdateIterationSpace-2.ll b/test/Transforms/LoopIndexSplit/2007-09-25-UpdateIterationSpace-2.ll
index 7d93785..6619c7d 100644
--- a/test/Transforms/LoopIndexSplit/2007-09-25-UpdateIterationSpace-2.ll
+++ b/test/Transforms/LoopIndexSplit/2007-09-25-UpdateIterationSpace-2.ll
@@ -9,7 +9,7 @@ entry:
bb.preheader: ; preds = %entry
%tmp3031 = fpext float %contribution to double ; <double> [#uses=1]
- %tmp32 = mul double %tmp3031, 5.000000e-01 ; <double> [#uses=1]
+ %tmp32 = fmul double %tmp3031, 5.000000e-01 ; <double> [#uses=1]
%tmp3839 = fpext float %sigmal to double ; <double> [#uses=1]
br label %bb
@@ -24,19 +24,19 @@ bb: ; preds = %cond_next45, %bb.preheader
cond_true9: ; preds = %bb
%tmp12 = getelementptr float* %x, i32 %i.01.0 ; <float*> [#uses=1]
%tmp13 = load float* %tmp12, align 4 ; <float> [#uses=1]
- %tmp15 = sub float %xcen, %tmp13 ; <float> [#uses=1]
+ %tmp15 = fsub float %xcen, %tmp13 ; <float> [#uses=1]
%tmp16 = tail call float @fabsf(float %tmp15) ; <float> [#uses=1]
%tmp18 = fdiv float %tmp16, %sigmal ; <float> [#uses=1]
%tmp21 = load float** %y, align 4 ; <float*> [#uses=2]
%tmp27 = getelementptr float* %tmp21, i32 %k.06.0 ; <float*> [#uses=1]
%tmp28 = load float* %tmp27, align 4 ; <float> [#uses=1]
%tmp2829 = fpext float %tmp28 to double ; <double> [#uses=1]
- %tmp34 = sub float -0.000000e+00, %tmp18 ; <float> [#uses=1]
+ %tmp34 = fsub float -0.000000e+00, %tmp18 ; <float> [#uses=1]
%tmp3435 = fpext float %tmp34 to double ; <double> [#uses=1]
%tmp36 = tail call double @exp(double %tmp3435) ; <double> [#uses=1]
- %tmp37 = mul double %tmp32, %tmp36 ; <double> [#uses=1]
+ %tmp37 = fmul double %tmp32, %tmp36 ; <double> [#uses=1]
%tmp40 = fdiv double %tmp37, %tmp3839 ; <double> [#uses=1]
- %tmp41 = add double %tmp2829, %tmp40 ; <double> [#uses=1]
+ %tmp41 = fadd double %tmp2829, %tmp40 ; <double> [#uses=1]
%tmp4142 = fptrunc double %tmp41 to float ; <float> [#uses=1]
%tmp44 = getelementptr float* %tmp21, i32 %k.06.0 ; <float*> [#uses=1]
store float %tmp4142, float* %tmp44, align 4
diff --git a/test/Transforms/Mem2Reg/PromoteMemToRegister.ll b/test/Transforms/Mem2Reg/PromoteMemToRegister.ll
index fdc33fb..63b8c78 100644
--- a/test/Transforms/Mem2Reg/PromoteMemToRegister.ll
+++ b/test/Transforms/Mem2Reg/PromoteMemToRegister.ll
@@ -12,7 +12,7 @@ define double @testfunc(i32 %i, double %j) {
%t3 = load i32* %I ; <i32> [#uses=1]
%t4 = sitofp i32 %t3 to double ; <double> [#uses=1]
%t5 = load double* %J ; <double> [#uses=1]
- %t6 = mul double %t4, %t5 ; <double> [#uses=1]
+ %t6 = fmul double %t4, %t5 ; <double> [#uses=1]
ret double %t6
}
diff --git a/test/Transforms/MemCpyOpt/memcpy.ll b/test/Transforms/MemCpyOpt/memcpy.ll
index c5cdc29..94daee0 100644
--- a/test/Transforms/MemCpyOpt/memcpy.ll
+++ b/test/Transforms/MemCpyOpt/memcpy.ll
@@ -7,7 +7,7 @@ define void @ccosl({ x86_fp80, x86_fp80 }* sret %agg.result, x86_fp80 %z.0, x86
entry:
%tmp2 = alloca { x86_fp80, x86_fp80 } ; <{ x86_fp80, x86_fp80 }*> [#uses=1]
%memtmp = alloca { x86_fp80, x86_fp80 }, align 16 ; <{ x86_fp80, x86_fp80 }*> [#uses=2]
- %tmp5 = sub x86_fp80 0xK80000000000000000000, %z.1 ; <x86_fp80> [#uses=1]
+ %tmp5 = fsub x86_fp80 0xK80000000000000000000, %z.1 ; <x86_fp80> [#uses=1]
call void @ccoshl( { x86_fp80, x86_fp80 }* sret %memtmp, x86_fp80 %tmp5, x86_fp80 %z.0 ) nounwind
%tmp219 = bitcast { x86_fp80, x86_fp80 }* %tmp2 to i8* ; <i8*> [#uses=2]
%memtmp20 = bitcast { x86_fp80, x86_fp80 }* %memtmp to i8* ; <i8*> [#uses=1]
diff --git a/test/Transforms/MemCpyOpt/sret.ll b/test/Transforms/MemCpyOpt/sret.ll
index 1ac11aa..ad9fb1b 100644
--- a/test/Transforms/MemCpyOpt/sret.ll
+++ b/test/Transforms/MemCpyOpt/sret.ll
@@ -9,7 +9,7 @@ entry:
%memtmp = alloca { x86_fp80, x86_fp80 }, align 16 ; <{ x86_fp80, x86_fp80 }*> [#uses=2]
%tmp1 = getelementptr { x86_fp80, x86_fp80 }* %z, i32 0, i32 1 ; <x86_fp80*> [#uses=1]
%tmp2 = load x86_fp80* %tmp1, align 16 ; <x86_fp80> [#uses=1]
- %tmp3 = sub x86_fp80 0xK80000000000000000000, %tmp2 ; <x86_fp80> [#uses=1]
+ %tmp3 = fsub x86_fp80 0xK80000000000000000000, %tmp2 ; <x86_fp80> [#uses=1]
%tmp4 = getelementptr { x86_fp80, x86_fp80 }* %iz, i32 0, i32 1 ; <x86_fp80*> [#uses=1]
%real = getelementptr { x86_fp80, x86_fp80 }* %iz, i32 0, i32 0 ; <x86_fp80*> [#uses=1]
%tmp7 = getelementptr { x86_fp80, x86_fp80 }* %z, i32 0, i32 0 ; <x86_fp80*> [#uses=1]
diff --git a/test/Transforms/PruneEH/2008-09-05-CGUpdate.ll b/test/Transforms/PruneEH/2008-09-05-CGUpdate.ll
index c3600ab..74434f4 100644
--- a/test/Transforms/PruneEH/2008-09-05-CGUpdate.ll
+++ b/test/Transforms/PruneEH/2008-09-05-CGUpdate.ll
@@ -477,12 +477,12 @@ invcont3: ; preds = %bb2
unreachable
bb4: ; preds = %invcont
- %3 = mul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=1]
+ %3 = fmul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=1]
%4 = fcmp ult x86_fp80 %3, 0xKC0068000000000000000 ; <i1> [#uses=1]
br i1 %4, label %bb8, label %bb6
bb6: ; preds = %bb4
- %5 = mul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=1]
+ %5 = fmul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=1]
%6 = fcmp ugt x86_fp80 %5, 0xK4005FE00000000000000 ; <i1> [#uses=1]
br i1 %6, label %bb8, label %bb10
@@ -494,16 +494,16 @@ invcont9: ; preds = %bb8
unreachable
bb10: ; preds = %bb6
- %7 = mul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=3]
+ %7 = fmul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=3]
%8 = fcmp ult x86_fp80 %7, 0xK00000000000000000000 ; <i1> [#uses=1]
br i1 %8, label %bb13, label %bb12
bb12: ; preds = %bb10
- %9 = add x86_fp80 %7, 0xK3FFDFFFFFFFFFFFFFFFF ; <x86_fp80> [#uses=1]
+ %9 = fadd x86_fp80 %7, 0xK3FFDFFFFFFFFFFFFFFFF ; <x86_fp80> [#uses=1]
br label %bb14
bb13: ; preds = %bb10
- %10 = sub x86_fp80 %7, 0xK3FFDFFFFFFFFFFFFFFFF ; <x86_fp80> [#uses=1]
+ %10 = fsub x86_fp80 %7, 0xK3FFDFFFFFFFFFFFFFFFF ; <x86_fp80> [#uses=1]
br label %bb14
bb14: ; preds = %bb13, %bb12
diff --git a/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll b/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll
index 05d6103..3662e09 100644
--- a/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll
+++ b/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll
@@ -1,8 +1,8 @@
; RUN: llvm-as < %s | opt -reassociate -disable-output
define void @foo() {
- %tmp162 = sub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>> [#uses=1]
- %tmp164 = mul <4 x float> zeroinitializer, %tmp162 ; <<4 x float>> [#uses=0]
+ %tmp162 = fsub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>> [#uses=1]
+ %tmp164 = fmul <4 x float> zeroinitializer, %tmp162 ; <<4 x float>> [#uses=0]
ret void
}
diff --git a/test/Transforms/SCCP/2006-12-04-PackedType.ll b/test/Transforms/SCCP/2006-12-04-PackedType.ll
index b7a7880..0e268c2 100644
--- a/test/Transforms/SCCP/2006-12-04-PackedType.ll
+++ b/test/Transforms/SCCP/2006-12-04-PackedType.ll
@@ -112,7 +112,7 @@ cond_true93: ; preds = %entry
%tmp.upgrd.1 = getelementptr %struct.GLDContextRec* %ctx, i32 0, i32 31, i32 14 ; <i32*> [#uses=1]
%tmp95 = load i32* %tmp.upgrd.1 ; <i32> [#uses=1]
%tmp95.upgrd.2 = sitofp i32 %tmp95 to float ; <float> [#uses=1]
- %tmp108 = mul float undef, %tmp95.upgrd.2 ; <float> [#uses=1]
+ %tmp108 = fmul float undef, %tmp95.upgrd.2 ; <float> [#uses=1]
br label %cond_next116
cond_next116: ; preds = %cond_true93, %entry
%point_size.2 = phi float [ %tmp108, %cond_true93 ], [ undef, %entry ] ; <float> [#uses=2]
@@ -130,7 +130,7 @@ cond_true462: ; preds = %cond_true458
cond_true467: ; preds = %cond_true462
ret void
cond_next484: ; preds = %cond_next116
- %tmp486 = mul float %point_size.2, 5.000000e-01 ; <float> [#uses=1]
+ %tmp486 = fmul float %point_size.2, 5.000000e-01 ; <float> [#uses=1]
br label %cond_next487
cond_next487: ; preds = %cond_next484, %cond_true462, %cond_true458
%radius.0 = phi float [ %tmp486, %cond_next484 ], [ 5.000000e-01, %cond_true458 ], [ 5.000000e-01, %cond_true462 ] ; <float> [#uses=2]
diff --git a/test/Transforms/SCCP/apint-ipsccp4.ll b/test/Transforms/SCCP/apint-ipsccp4.ll
index de355d1..a0656b7 100644
--- a/test/Transforms/SCCP/apint-ipsccp4.ll
+++ b/test/Transforms/SCCP/apint-ipsccp4.ll
@@ -35,10 +35,10 @@ define float @All()
%B = fcmp oge float %A, 1.0
br i1 %B, label %T, label %F
T:
- %C = add float %A, 1.0
+ %C = fadd float %A, 1.0
br label %exit
F:
- %D = add float %A, 2.0
+ %D = fadd float %A, 2.0
br label %exit
exit:
%E = phi float [%C, %T], [%D, %F]
diff --git a/test/Transforms/ScalarRepl/2009-03-17-CleanUp.ll b/test/Transforms/ScalarRepl/2009-03-17-CleanUp.ll
index 13055ea..facb7c1 100644
--- a/test/Transforms/ScalarRepl/2009-03-17-CleanUp.ll
+++ b/test/Transforms/ScalarRepl/2009-03-17-CleanUp.ll
@@ -1766,7 +1766,7 @@ _ZL13random_doublev.exit: ; preds = %bb.i, %bb7
call void @llvm.dbg.stoppoint(i32 75, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to %0*))
%22 = load i32* @_ZZL13random_doublevE4seed, align 4 ; <i32> [#uses=2]
%23 = sitofp i32 %22 to double ; <double> [#uses=1]
- %24 = mul double %23, 0x3E340000002813D9 ; <double> [#uses=1]
+ %24 = fmul double %23, 0x3E340000002813D9 ; <double> [#uses=1]
call void @llvm.dbg.stoppoint(i32 76, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to %0*))
%25 = xor i32 %22, 123459876 ; <i32> [#uses=1]
store i32 %25, i32* @_ZZL13random_doublevE4seed, align 4
@@ -1803,7 +1803,7 @@ bb8: ; preds = %bb.i1, %_ZL13random_doublev.exit
call void @llvm.dbg.stoppoint(i32 75, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to %0*))
%38 = load i32* @_ZZL13random_doublevE4seed, align 4 ; <i32> [#uses=2]
%39 = sitofp i32 %38 to double ; <double> [#uses=1]
- %40 = mul double %39, 0x3E340000002813D9 ; <double> [#uses=1]
+ %40 = fmul double %39, 0x3E340000002813D9 ; <double> [#uses=1]
call void @llvm.dbg.stoppoint(i32 76, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to %0*))
%41 = xor i32 %38, 123459876 ; <i32> [#uses=1]
store i32 %41, i32* @_ZZL13random_doublevE4seed, align 4
@@ -2110,16 +2110,16 @@ entry:
%real7 = load double* %real6, align 8 ; <double> [#uses=4]
%imag8 = getelementptr %1* %memtmp1, i32 0, i32 1 ; <double*> [#uses=1]
%imag9 = load double* %imag8, align 8 ; <double> [#uses=4]
- %21 = mul double %real3, %real7 ; <double> [#uses=1]
- %22 = mul double %imag5, %imag9 ; <double> [#uses=1]
- %23 = add double %21, %22 ; <double> [#uses=1]
- %24 = mul double %real7, %real7 ; <double> [#uses=1]
- %25 = mul double %imag9, %imag9 ; <double> [#uses=1]
- %26 = add double %24, %25 ; <double> [#uses=2]
+ %21 = fmul double %real3, %real7 ; <double> [#uses=1]
+ %22 = fmul double %imag5, %imag9 ; <double> [#uses=1]
+ %23 = fadd double %21, %22 ; <double> [#uses=1]
+ %24 = fmul double %real7, %real7 ; <double> [#uses=1]
+ %25 = fmul double %imag9, %imag9 ; <double> [#uses=1]
+ %26 = fadd double %24, %25 ; <double> [#uses=2]
%27 = fdiv double %23, %26 ; <double> [#uses=1]
- %28 = mul double %imag5, %real7 ; <double> [#uses=1]
- %29 = mul double %real3, %imag9 ; <double> [#uses=1]
- %30 = sub double %28, %29 ; <double> [#uses=1]
+ %28 = fmul double %imag5, %real7 ; <double> [#uses=1]
+ %29 = fmul double %real3, %imag9 ; <double> [#uses=1]
+ %30 = fsub double %28, %29 ; <double> [#uses=1]
%31 = fdiv double %30, %26 ; <double> [#uses=1]
%real10 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %27, double* %real10, align 8
@@ -2227,12 +2227,12 @@ entry:
%real9 = load double* %real8, align 8 ; <double> [#uses=2]
%imag10 = getelementptr %1* %memtmp3, i32 0, i32 1 ; <double*> [#uses=1]
%imag11 = load double* %imag10, align 8 ; <double> [#uses=2]
- %27 = mul double %real5, %real9 ; <double> [#uses=1]
- %28 = mul double %imag7, %imag11 ; <double> [#uses=1]
- %29 = sub double %27, %28 ; <double> [#uses=1]
- %30 = mul double %real5, %imag11 ; <double> [#uses=1]
- %31 = mul double %real9, %imag7 ; <double> [#uses=1]
- %32 = add double %30, %31 ; <double> [#uses=1]
+ %27 = fmul double %real5, %real9 ; <double> [#uses=1]
+ %28 = fmul double %imag7, %imag11 ; <double> [#uses=1]
+ %29 = fsub double %27, %28 ; <double> [#uses=1]
+ %30 = fmul double %real5, %imag11 ; <double> [#uses=1]
+ %31 = fmul double %real9, %imag7 ; <double> [#uses=1]
+ %32 = fadd double %30, %31 ; <double> [#uses=1]
%real12 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %29, double* %real12, align 8
%imag13 = getelementptr %1* %0, i32 0, i32 1 ; <double*> [#uses=1]
@@ -2384,10 +2384,10 @@ entry:
call void @llvm.dbg.stoppoint(i32 444, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit5 to %0*))
%0 = call double* @_ZNKSt7complexIdE4imagEv(%"struct.std::complex<double>"* %__x) nounwind ; <double*> [#uses=1]
%1 = load double* %0, align 8 ; <double> [#uses=1]
- %2 = sub double -0.000000e+00, %1 ; <double> [#uses=1]
+ %2 = fsub double -0.000000e+00, %1 ; <double> [#uses=1]
%3 = call double* @_ZNKSt7complexIdE4realEv(%"struct.std::complex<double>"* %__x) nounwind ; <double*> [#uses=1]
%4 = load double* %3, align 8 ; <double> [#uses=1]
- %5 = sub double -0.000000e+00, %4 ; <double> [#uses=1]
+ %5 = fsub double -0.000000e+00, %4 ; <double> [#uses=1]
call void @_ZNSt7complexIdEC1Edd(%"struct.std::complex<double>"* %agg.result, double %5, double %2) nounwind
call void @llvm.dbg.region.end(%0* bitcast (%llvm.dbg.subprogram.type* @llvm.dbg.subprogram576 to %0*))
ret void
@@ -2497,16 +2497,16 @@ entry:
%real9 = load double* %real8, align 8 ; <double> [#uses=4]
%imag10 = getelementptr %1* %memtmp3, i32 0, i32 1 ; <double*> [#uses=1]
%imag11 = load double* %imag10, align 8 ; <double> [#uses=4]
- %27 = mul double %real5, %real9 ; <double> [#uses=1]
- %28 = mul double %imag7, %imag11 ; <double> [#uses=1]
- %29 = add double %27, %28 ; <double> [#uses=1]
- %30 = mul double %real9, %real9 ; <double> [#uses=1]
- %31 = mul double %imag11, %imag11 ; <double> [#uses=1]
- %32 = add double %30, %31 ; <double> [#uses=2]
+ %27 = fmul double %real5, %real9 ; <double> [#uses=1]
+ %28 = fmul double %imag7, %imag11 ; <double> [#uses=1]
+ %29 = fadd double %27, %28 ; <double> [#uses=1]
+ %30 = fmul double %real9, %real9 ; <double> [#uses=1]
+ %31 = fmul double %imag11, %imag11 ; <double> [#uses=1]
+ %32 = fadd double %30, %31 ; <double> [#uses=2]
%33 = fdiv double %29, %32 ; <double> [#uses=1]
- %34 = mul double %imag7, %real9 ; <double> [#uses=1]
- %35 = mul double %real5, %imag11 ; <double> [#uses=1]
- %36 = sub double %34, %35 ; <double> [#uses=1]
+ %34 = fmul double %imag7, %real9 ; <double> [#uses=1]
+ %35 = fmul double %real5, %imag11 ; <double> [#uses=1]
+ %36 = fsub double %34, %35 ; <double> [#uses=1]
%37 = fdiv double %36, %32 ; <double> [#uses=1]
%real12 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %33, double* %real12, align 8
@@ -2554,7 +2554,7 @@ entry:
%1 = load double* %0, align 4 ; <double> [#uses=1]
%2 = call double* @_ZNKSt7complexIdE4realEv(%"struct.std::complex<double>"* %__z) nounwind ; <double*> [#uses=1]
%3 = load double* %2, align 8 ; <double> [#uses=1]
- %4 = add double %1, %3 ; <double> [#uses=1]
+ %4 = fadd double %1, %3 ; <double> [#uses=1]
%5 = getelementptr %"struct.std::complex<double>"* %this, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
store double %4, double* %5, align 4
call void @llvm.dbg.stoppoint(i32 1271, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit5 to %0*))
@@ -2562,7 +2562,7 @@ entry:
%7 = load double* %6, align 4 ; <double> [#uses=1]
%8 = call double* @_ZNKSt7complexIdE4imagEv(%"struct.std::complex<double>"* %__z) nounwind ; <double*> [#uses=1]
%9 = load double* %8, align 8 ; <double> [#uses=1]
- %10 = add double %7, %9 ; <double> [#uses=1]
+ %10 = fadd double %7, %9 ; <double> [#uses=1]
%11 = getelementptr %"struct.std::complex<double>"* %this, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
store double %10, double* %11, align 4
call void @llvm.dbg.stoppoint(i32 1272, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit5 to %0*))
@@ -2599,7 +2599,7 @@ entry:
%1 = load double* %0, align 4 ; <double> [#uses=1]
%2 = call double* @_ZNKSt7complexIdE4realEv(%"struct.std::complex<double>"* %__z) nounwind ; <double*> [#uses=1]
%3 = load double* %2, align 8 ; <double> [#uses=1]
- %4 = sub double %1, %3 ; <double> [#uses=1]
+ %4 = fsub double %1, %3 ; <double> [#uses=1]
%5 = getelementptr %"struct.std::complex<double>"* %this, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
store double %4, double* %5, align 4
call void @llvm.dbg.stoppoint(i32 1280, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit5 to %0*))
@@ -2607,7 +2607,7 @@ entry:
%7 = load double* %6, align 4 ; <double> [#uses=1]
%8 = call double* @_ZNKSt7complexIdE4imagEv(%"struct.std::complex<double>"* %__z) nounwind ; <double*> [#uses=1]
%9 = load double* %8, align 8 ; <double> [#uses=1]
- %10 = sub double %7, %9 ; <double> [#uses=1]
+ %10 = fsub double %7, %9 ; <double> [#uses=1]
%11 = getelementptr %"struct.std::complex<double>"* %this, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
store double %10, double* %11, align 4
call void @llvm.dbg.stoppoint(i32 1281, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit5 to %0*))
diff --git a/test/Transforms/ScalarRepl/copy-aggregate.ll b/test/Transforms/ScalarRepl/copy-aggregate.ll
index 4ab17ae..a1ad3f9 100644
--- a/test/Transforms/ScalarRepl/copy-aggregate.ll
+++ b/test/Transforms/ScalarRepl/copy-aggregate.ll
@@ -25,7 +25,7 @@ define float @test2(i128 %V) nounwind {
%B = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 3
%a = load float* %A
%b = load float* %B
- %c = add float %a, %b
+ %c = fadd float %a, %b
ret float %c
}
diff --git a/test/Transforms/ScalarRepl/memcpy-from-global.ll b/test/Transforms/ScalarRepl/memcpy-from-global.ll
index ee77e1f..e62ccc2 100644
--- a/test/Transforms/ScalarRepl/memcpy-from-global.ll
+++ b/test/Transforms/ScalarRepl/memcpy-from-global.ll
@@ -10,23 +10,23 @@ entry:
%tmp5 = and i32 %tmp3, 124 ; <i32> [#uses=4]
%tmp753 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp5 ; <float*> [#uses=1]
%tmp9 = load float* %tmp753 ; <float> [#uses=1]
- %tmp11 = mul float %tmp9, %x ; <float> [#uses=1]
- %tmp13 = add float %tmp11, 0.000000e+00 ; <float> [#uses=1]
+ %tmp11 = fmul float %tmp9, %x ; <float> [#uses=1]
+ %tmp13 = fadd float %tmp11, 0.000000e+00 ; <float> [#uses=1]
%tmp17.sum52 = or i32 %tmp5, 1 ; <i32> [#uses=1]
%tmp1851 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp17.sum52 ; <float*> [#uses=1]
%tmp19 = load float* %tmp1851 ; <float> [#uses=1]
- %tmp21 = mul float %tmp19, %y ; <float> [#uses=1]
- %tmp23 = add float %tmp21, %tmp13 ; <float> [#uses=1]
+ %tmp21 = fmul float %tmp19, %y ; <float> [#uses=1]
+ %tmp23 = fadd float %tmp21, %tmp13 ; <float> [#uses=1]
%tmp27.sum50 = or i32 %tmp5, 2 ; <i32> [#uses=1]
%tmp2849 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp27.sum50 ; <float*> [#uses=1]
%tmp29 = load float* %tmp2849 ; <float> [#uses=1]
- %tmp31 = mul float %tmp29, %z ; <float> [#uses=1]
- %tmp33 = add float %tmp31, %tmp23 ; <float> [#uses=1]
+ %tmp31 = fmul float %tmp29, %z ; <float> [#uses=1]
+ %tmp33 = fadd float %tmp31, %tmp23 ; <float> [#uses=1]
%tmp37.sum48 = or i32 %tmp5, 3 ; <i32> [#uses=1]
%tmp3847 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp37.sum48 ; <float*> [#uses=1]
%tmp39 = load float* %tmp3847 ; <float> [#uses=1]
- %tmp41 = mul float %tmp39, %w ; <float> [#uses=1]
- %tmp43 = add float %tmp41, %tmp33 ; <float> [#uses=1]
+ %tmp41 = fmul float %tmp39, %w ; <float> [#uses=1]
+ %tmp43 = fadd float %tmp41, %tmp33 ; <float> [#uses=1]
ret float %tmp43
}
diff --git a/test/Transforms/ScalarRepl/vector_promote.ll b/test/Transforms/ScalarRepl/vector_promote.ll
index a0d3317..4b6555b 100644
--- a/test/Transforms/ScalarRepl/vector_promote.ll
+++ b/test/Transforms/ScalarRepl/vector_promote.ll
@@ -5,12 +5,12 @@ define void @test(<4 x float>* %F, float %f) {
entry:
%G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
%tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
- %tmp3 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
+ %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
%G.upgrd.1 = getelementptr <4 x float>* %G, i32 0, i32 0 ; <float*> [#uses=1]
store float %f, float* %G.upgrd.1
%tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2]
- %tmp6 = add <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1]
+ %tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1]
store <4 x float> %tmp6, <4 x float>* %F
ret void
}
@@ -19,12 +19,12 @@ define void @test2(<4 x float>* %F, float %f) {
entry:
%G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
%tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
- %tmp3 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
+ %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
%tmp.upgrd.2 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
store float %f, float* %tmp.upgrd.2
%tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2]
- %tmp6 = add <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1]
+ %tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1]
store <4 x float> %tmp6, <4 x float>* %F
ret void
}
@@ -33,7 +33,7 @@ define void @test3(<4 x float>* %F, float* %f) {
entry:
%G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
- %tmp3 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
+ %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
%tmp.upgrd.3 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
%tmp.upgrd.4 = load float* %tmp.upgrd.3 ; <float> [#uses=1]
@@ -45,7 +45,7 @@ define void @test4(<4 x float>* %F, float* %f) {
entry:
%G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
- %tmp3 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
+ %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
%G.upgrd.5 = getelementptr <4 x float>* %G, i32 0, i32 0 ; <float*> [#uses=1]
%tmp.upgrd.6 = load float* %G.upgrd.5 ; <float> [#uses=1]
diff --git a/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll b/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll
index f22ca6c..6bfef02 100644
--- a/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll
+++ b/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll
@@ -142,11 +142,11 @@ invcont57: ; preds = %invcont51
store double %tmp64, double* %tmp62
%tmp65 = call double* @_ZN6QSizeF6rwidthEv( %struct.QPointF* %scaledPageSize ) ; <double*> [#uses=2]
%tmp67 = load double* %tmp65 ; <double> [#uses=1]
- %tmp69 = mul double %tmp67, %tmp48 ; <double> [#uses=1]
+ %tmp69 = fmul double %tmp67, %tmp48 ; <double> [#uses=1]
store double %tmp69, double* %tmp65
%tmp71 = call double* @_ZN6QSizeF7rheightEv( %struct.QPointF* %scaledPageSize ) ; <double*> [#uses=2]
%tmp73 = load double* %tmp71 ; <double> [#uses=1]
- %tmp75 = mul double %tmp73, %tmp54 ; <double> [#uses=1]
+ %tmp75 = fmul double %tmp73, %tmp54 ; <double> [#uses=1]
store double %tmp75, double* %tmp71
%tmp78 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0 ; <%struct.QPaintDevice*> [#uses=1]
%tmp80 = invoke i32 @_ZNK12QPaintDevice6heightEv( %struct.QPaintDevice* %tmp78 )
@@ -188,7 +188,7 @@ invcont104: ; preds = %invcont103
to label %invcont106 unwind label %cleanup329 ; <i32> [#uses=1]
invcont106: ; preds = %invcont104
%tmp108 = sitofp i32 %tmp107 to double ; <double> [#uses=1]
- %tmp109 = mul double %tmp108, 0x3FE93264C993264C ; <double> [#uses=1]
+ %tmp109 = fmul double %tmp108, 0x3FE93264C993264C ; <double> [#uses=1]
%tmp109.upgrd.17 = fptosi double %tmp109 to i32 ; <i32> [#uses=3]
%tmp.upgrd.18 = call %struct.QTextBlockGroup* @_ZNK13QTextDocument9rootFrameEv( %struct.QAbstractTextDocumentLayout* %tmp95 ) ; <%struct.QTextBlockGroup*> [#uses=1]
invoke void @_ZNK10QTextFrame11frameFormatEv( %struct.QTextBlockFormat* sret %fmt, %struct.QTextBlockGroup* %tmp.upgrd.18 )
@@ -235,7 +235,7 @@ invcont124: ; preds = %invcont122
store double %tmp137, double* %tmp135
%tmp138 = call double @_ZNK6QRectF6heightEv( %struct.QRectF* %body ) ; <double> [#uses=1]
%tmp139 = sitofp i32 %tmp109.upgrd.17 to double ; <double> [#uses=1]
- %tmp140 = sub double %tmp138, %tmp139 ; <double> [#uses=1]
+ %tmp140 = fsub double %tmp138, %tmp139 ; <double> [#uses=1]
%tmp142 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
to label %invcont141 unwind label %cleanup192 ; <%struct.QPaintDevice*> [#uses=1]
invcont141: ; preds = %invcont124
@@ -249,7 +249,7 @@ invcont146: ; preds = %invcont144
to label %invcont148 unwind label %cleanup168 ; <i32> [#uses=1]
invcont148: ; preds = %invcont146
%tmp149.upgrd.21 = sitofp i32 %tmp149 to double ; <double> [#uses=1]
- %tmp150 = add double %tmp140, %tmp149.upgrd.21 ; <double> [#uses=1]
+ %tmp150 = fadd double %tmp140, %tmp149.upgrd.21 ; <double> [#uses=1]
%tmp152 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
to label %invcont151 unwind label %cleanup168 ; <%struct.QPaintDevice*> [#uses=1]
invcont151: ; preds = %invcont148
@@ -259,10 +259,10 @@ invcont153: ; preds = %invcont151
%tmp155 = mul i32 %tmp154, 5 ; <i32> [#uses=1]
%tmp156 = sdiv i32 %tmp155, 72 ; <i32> [#uses=1]
%tmp156.upgrd.22 = sitofp i32 %tmp156 to double ; <double> [#uses=1]
- %tmp157 = add double %tmp150, %tmp156.upgrd.22 ; <double> [#uses=1]
+ %tmp157 = fadd double %tmp150, %tmp156.upgrd.22 ; <double> [#uses=1]
%tmp158 = call double @_ZNK6QRectF5widthEv( %struct.QRectF* %body ) ; <double> [#uses=1]
%tmp159 = sitofp i32 %tmp109.upgrd.17 to double ; <double> [#uses=1]
- %tmp160 = sub double %tmp158, %tmp159 ; <double> [#uses=1]
+ %tmp160 = fsub double %tmp158, %tmp159 ; <double> [#uses=1]
call void @_ZN7QPointFC1Edd( %struct.QPointF* %tmp2, double %tmp160, double %tmp157 )
%tmp161 = getelementptr %struct.QPointF* %pageNumberPos, i32 0, i32 0 ; <double*> [#uses=1]
%tmp162 = getelementptr %struct.QPointF* %tmp2, i32 0, i32 0 ; <double*> [#uses=1]
diff --git a/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll b/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll
index 43ff690..4c9c9e8 100644
--- a/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll
+++ b/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll
@@ -15,7 +15,7 @@ entry:
br i1 %toBool, label %cond_true, label %cond_next
cond_true: ; preds = %entry
- %tmp7 = add double %tmp, %Z ; <double> [#uses=1]
+ %tmp7 = fadd double %tmp, %Z ; <double> [#uses=1]
br label %cond_next
cond_next: ; preds = %cond_true, %entry
diff --git a/test/Transforms/SimplifyCFG/2008-04-27-MultipleReturnCrash.ll b/test/Transforms/SimplifyCFG/2008-04-27-MultipleReturnCrash.ll
index a370b95..be3410c 100644
--- a/test/Transforms/SimplifyCFG/2008-04-27-MultipleReturnCrash.ll
+++ b/test/Transforms/SimplifyCFG/2008-04-27-MultipleReturnCrash.ll
@@ -21,7 +21,7 @@ bb56: ; preds = %bb48
bb174: ; preds = %bb144, %bb114
- %tmp191 = mul x86_fp80 0xK00000000000000000000, 0xK3FFE8000000000000000 ; <x86_fp80> [#uses=1]
+ %tmp191 = fmul x86_fp80 0xK00000000000000000000, 0xK3FFE8000000000000000 ; <x86_fp80> [#uses=1]
br label %bb196
bb196: ; preds = %bb174, %bb56, %bb40
diff --git a/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll b/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll
index 5969f27..dc0cbbe 100644
--- a/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll
+++ b/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll
@@ -29,7 +29,7 @@ bb3: ; preds = %bb2, %bb1
store i32 %storemerge, i32* @j
%1 = sitofp i32 %storemerge to double ; <double> [#uses=1]
%2 = call double @sin(double %1) nounwind readonly ; <double> [#uses=1]
- %3 = add double %2, %d.0 ; <double> [#uses=1]
+ %3 = fadd double %2, %d.0 ; <double> [#uses=1]
%4 = add i32 %l.0, 1 ; <i32> [#uses=1]
br label %bb4
diff --git a/test/Transforms/SimplifyLibCalls/half-powr.ll b/test/Transforms/SimplifyLibCalls/half-powr.ll
index f4e898c..890e788 100644
--- a/test/Transforms/SimplifyLibCalls/half-powr.ll
+++ b/test/Transforms/SimplifyLibCalls/half-powr.ll
@@ -11,7 +11,7 @@ bb: ; preds = %entry
bb1: ; preds = %bb, %entry
%f_addr.0 = phi float [ %1, %bb ], [ %f, %entry ] ; <float> [#uses=1]
- %2 = mul float %f_addr.0, %g ; <float> [#uses=1]
+ %2 = fmul float %f_addr.0, %g ; <float> [#uses=1]
ret float %2
}