aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/PowerPC
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/PowerPC')
-rw-r--r--test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll2
-rw-r--r--test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll6
-rw-r--r--test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll88
-rw-r--r--test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll4
-rw-r--r--test/CodeGen/PowerPC/2008-07-15-Fabs.ll8
-rw-r--r--test/CodeGen/PowerPC/2008-07-17-Fneg.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll40
-rw-r--r--test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll6
-rw-r--r--test/CodeGen/PowerPC/2008-10-28-f128-i32.ll6
-rw-r--r--test/CodeGen/PowerPC/buildvec_canonicalize.ll2
-rw-r--r--test/CodeGen/PowerPC/fma.ll40
-rw-r--r--test/CodeGen/PowerPC/fnabs.ll2
-rw-r--r--test/CodeGen/PowerPC/fneg.ll8
-rw-r--r--test/CodeGen/PowerPC/int-fp-conv-1.ll2
-rw-r--r--test/CodeGen/PowerPC/itofp128.ll2
-rw-r--r--test/CodeGen/PowerPC/mem-rr-addr-mode.ll4
-rw-r--r--test/CodeGen/PowerPC/multiple-return-values.ll2
-rw-r--r--test/CodeGen/PowerPC/ppcf128-1-opt.ll6
-rw-r--r--test/CodeGen/PowerPC/ppcf128-1.ll6
-rw-r--r--test/CodeGen/PowerPC/ppcf128-2.ll2
-rw-r--r--test/CodeGen/PowerPC/ppcf128-4.ll4
-rw-r--r--test/CodeGen/PowerPC/return-val-i128.ll2
-rw-r--r--test/CodeGen/PowerPC/unsafe-math.ll4
-rw-r--r--test/CodeGen/PowerPC/vec_fneg.ll2
-rw-r--r--test/CodeGen/PowerPC/vec_splat.ll2
-rw-r--r--test/CodeGen/PowerPC/vec_zero.ll2
-rw-r--r--test/CodeGen/PowerPC/vector.ll20
27 files changed, 137 insertions, 137 deletions
diff --git a/test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll b/test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll
index e2f06f5..1b3bde8 100644
--- a/test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll
+++ b/test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll
@@ -5,6 +5,6 @@ target triple = "powerpc-apple-darwin8.2.0"
; Dead argument should reserve an FP register.
define double @bar(double %DEAD, double %X, double %Y) {
- %tmp.2 = add double %X, %Y ; <double> [#uses=1]
+ %tmp.2 = fadd double %X, %Y ; <double> [#uses=1]
ret double %tmp.2
}
diff --git a/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll b/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll
index a58cd16..7a65c00 100644
--- a/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll
+++ b/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll
@@ -9,15 +9,15 @@ define void @offset(%struct.Point* %pt, double %x, double %y, double %z) {
entry:
%tmp = getelementptr %struct.Point* %pt, i32 0, i32 0 ; <double*> [#uses=2]
%tmp.upgrd.1 = load double* %tmp ; <double> [#uses=1]
- %tmp2 = add double %tmp.upgrd.1, %x ; <double> [#uses=1]
+ %tmp2 = fadd double %tmp.upgrd.1, %x ; <double> [#uses=1]
store double %tmp2, double* %tmp
%tmp6 = getelementptr %struct.Point* %pt, i32 0, i32 1 ; <double*> [#uses=2]
%tmp7 = load double* %tmp6 ; <double> [#uses=1]
- %tmp9 = add double %tmp7, %y ; <double> [#uses=1]
+ %tmp9 = fadd double %tmp7, %y ; <double> [#uses=1]
store double %tmp9, double* %tmp6
%tmp13 = getelementptr %struct.Point* %pt, i32 0, i32 2 ; <double*> [#uses=2]
%tmp14 = load double* %tmp13 ; <double> [#uses=1]
- %tmp16 = add double %tmp14, %z ; <double> [#uses=1]
+ %tmp16 = fadd double %tmp14, %z ; <double> [#uses=1]
store double %tmp16, double* %tmp13
ret void
}
diff --git a/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll b/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll
index 04ca3bb..637208b 100644
--- a/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll
+++ b/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll
@@ -604,10 +604,10 @@ xPIF.exit: ; preds = %.critedge7898, %xOperationInitMasks.exit
shufflevector <4 x float> %583, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:589 [#uses=1]
shufflevector <4 x float> %585, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:590 [#uses=1]
shufflevector <4 x float> %588, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:591 [#uses=1]
- mul <4 x float> zeroinitializer, %589 ; <<4 x float>>:592 [#uses=0]
- mul <4 x float> zeroinitializer, %590 ; <<4 x float>>:593 [#uses=0]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:594 [#uses=1]
- mul <4 x float> zeroinitializer, %591 ; <<4 x float>>:595 [#uses=0]
+ fmul <4 x float> zeroinitializer, %589 ; <<4 x float>>:592 [#uses=0]
+ fmul <4 x float> zeroinitializer, %590 ; <<4 x float>>:593 [#uses=0]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:594 [#uses=1]
+ fmul <4 x float> zeroinitializer, %591 ; <<4 x float>>:595 [#uses=0]
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:596 [#uses=2]
load <4 x float>* %596 ; <<4 x float>>:597 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %596
@@ -621,8 +621,8 @@ xPIF.exit: ; preds = %.critedge7898, %xOperationInitMasks.exit
load <4 x float>* null ; <<4 x float>>:604 [#uses=1]
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:605 [#uses=1]
load <4 x float>* %605 ; <<4 x float>>:606 [#uses=1]
- sub <4 x float> zeroinitializer, %604 ; <<4 x float>>:607 [#uses=2]
- sub <4 x float> zeroinitializer, %606 ; <<4 x float>>:608 [#uses=2]
+ fsub <4 x float> zeroinitializer, %604 ; <<4 x float>>:607 [#uses=2]
+ fsub <4 x float> zeroinitializer, %606 ; <<4 x float>>:608 [#uses=2]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:609 [#uses=0]
br i1 false, label %617, label %610
@@ -672,21 +672,21 @@ xST.exit400: ; preds = %633, %625, %610
load <4 x float>* null ; <<4 x float>>:638 [#uses=2]
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:639 [#uses=0]
load <4 x float>* null ; <<4 x float>>:640 [#uses=2]
- mul <4 x float> %638, %638 ; <<4 x float>>:641 [#uses=1]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:642 [#uses=0]
- mul <4 x float> %640, %640 ; <<4 x float>>:643 [#uses=2]
+ fmul <4 x float> %638, %638 ; <<4 x float>>:641 [#uses=1]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:642 [#uses=0]
+ fmul <4 x float> %640, %640 ; <<4 x float>>:643 [#uses=2]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>>:644 [#uses=0]
shufflevector <4 x float> %643, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>>:645 [#uses=1]
- add <4 x float> %645, %643 ; <<4 x float>>:646 [#uses=0]
+ fadd <4 x float> %645, %643 ; <<4 x float>>:646 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:647 [#uses=1]
shufflevector <4 x float> %641, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:648 [#uses=1]
- add <4 x float> zeroinitializer, %647 ; <<4 x float>>:649 [#uses=2]
- add <4 x float> zeroinitializer, %648 ; <<4 x float>>:650 [#uses=0]
- add <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:651 [#uses=2]
+ fadd <4 x float> zeroinitializer, %647 ; <<4 x float>>:649 [#uses=2]
+ fadd <4 x float> zeroinitializer, %648 ; <<4 x float>>:650 [#uses=0]
+ fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:651 [#uses=2]
call <4 x float> @llvm.ppc.altivec.vrsqrtefp( <4 x float> %649 ) ; <<4 x float>>:652 [#uses=1]
- mul <4 x float> %652, %649 ; <<4 x float>>:653 [#uses=1]
+ fmul <4 x float> %652, %649 ; <<4 x float>>:653 [#uses=1]
call <4 x float> @llvm.ppc.altivec.vrsqrtefp( <4 x float> %651 ) ; <<4 x float>>:654 [#uses=1]
- mul <4 x float> %654, %651 ; <<4 x float>>:655 [#uses=0]
+ fmul <4 x float> %654, %651 ; <<4 x float>>:655 [#uses=0]
icmp eq i32 0, 0 ; <i1>:656 [#uses=1]
br i1 %656, label %665, label %657
@@ -721,9 +721,9 @@ xST.exit402: ; preds = %669, %657
load <4 x float>* null ; <<4 x float>>:676 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:677 [#uses=1]
shufflevector <4 x float> %675, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:678 [#uses=1]
- mul <4 x float> zeroinitializer, %677 ; <<4 x float>>:679 [#uses=0]
- mul <4 x float> zeroinitializer, %678 ; <<4 x float>>:680 [#uses=0]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:681 [#uses=1]
+ fmul <4 x float> zeroinitializer, %677 ; <<4 x float>>:679 [#uses=0]
+ fmul <4 x float> zeroinitializer, %678 ; <<4 x float>>:680 [#uses=0]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:681 [#uses=1]
icmp eq i32 0, 0 ; <i1>:682 [#uses=1]
br i1 %682, label %689, label %683
@@ -750,7 +750,7 @@ xST.exit405: ; preds = %689, %683
load <4 x float>* null ; <<4 x float>>:698 [#uses=0]
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:699 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:700 [#uses=1]
- add <4 x float> zeroinitializer, %700 ; <<4 x float>>:701 [#uses=0]
+ fadd <4 x float> zeroinitializer, %700 ; <<4 x float>>:701 [#uses=0]
load <4 x i32>* %.sub7896 ; <<4 x i32>>:702 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %702, <4 x i32> zeroinitializer ) ; <i32>:703 [#uses=0]
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:704 [#uses=2]
@@ -769,7 +769,7 @@ xST.exit405: ; preds = %689, %683
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:714 [#uses=1]
load <4 x float>* %714 ; <<4 x float>>:715 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:716 [#uses=0]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:717 [#uses=1]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:717 [#uses=1]
load <4 x i32>* %.sub7896 ; <<4 x i32>>:718 [#uses=0]
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 0 ; <<4 x float>*>:719 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %719
@@ -791,10 +791,10 @@ xST.exit405: ; preds = %689, %683
load <4 x float>* %732 ; <<4 x float>>:733 [#uses=0]
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:734 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:735 [#uses=1]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:736 [#uses=1]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:737 [#uses=1]
- mul <4 x float> zeroinitializer, %735 ; <<4 x float>>:738 [#uses=1]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:739 [#uses=1]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:736 [#uses=1]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:737 [#uses=1]
+ fmul <4 x float> zeroinitializer, %735 ; <<4 x float>>:738 [#uses=1]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:739 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:740 [#uses=1]
icmp eq i32 %740, 0 ; <i1>:741 [#uses=0]
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:742 [#uses=2]
@@ -821,9 +821,9 @@ xST.exit405: ; preds = %689, %683
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:761 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:762 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:763 [#uses=1]
- add <4 x float> %757, zeroinitializer ; <<4 x float>>:764 [#uses=0]
- add <4 x float> %758, %763 ; <<4 x float>>:765 [#uses=0]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:766 [#uses=1]
+ fadd <4 x float> %757, zeroinitializer ; <<4 x float>>:764 [#uses=0]
+ fadd <4 x float> %758, %763 ; <<4 x float>>:765 [#uses=0]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:766 [#uses=1]
br i1 false, label %773, label %767
; <label>:767 ; preds = %xST.exit405
@@ -841,7 +841,7 @@ xST.exit405: ; preds = %689, %683
xST.exit422: ; preds = %773, %767
%.07267 = phi <4 x float> [ %766, %767 ], [ undef, %773 ] ; <<4 x float>> [#uses=0]
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:774 [#uses=0]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:775 [#uses=0]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:775 [#uses=0]
icmp eq i32 0, 0 ; <i1>:776 [#uses=1]
br i1 %776, label %780, label %777
@@ -1295,7 +1295,7 @@ xST.exit469: ; preds = %1027, %1025, %1005
%.07489 = phi <4 x float> [ %1002, %1005 ], [ %.17490, %1027 ], [ %.17490, %1025 ] ; <<4 x float>> [#uses=1]
load <4 x float>* null ; <<4 x float>>:1029 [#uses=0]
load <4 x float>* null ; <<4 x float>>:1030 [#uses=0]
- sub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1031 [#uses=1]
+ fsub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1031 [#uses=1]
br i1 false, label %1037, label %1032
; <label>:1032 ; preds = %xST.exit469
@@ -1368,8 +1368,8 @@ xST.exit472: ; preds = %1050, %1048, %1032
xST.exit474: ; preds = %1059, %1058, %1051
load <4 x float>* null ; <<4 x float>>:1060 [#uses=1]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1061 [#uses=1]
- mul <4 x float> %1060, zeroinitializer ; <<4 x float>>:1062 [#uses=2]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1061 [#uses=1]
+ fmul <4 x float> %1060, zeroinitializer ; <<4 x float>>:1062 [#uses=2]
br i1 false, label %1065, label %1063
; <label>:1063 ; preds = %xST.exit474
@@ -1556,8 +1556,8 @@ xST.exit489: ; preds = %1109, %1108, %1101
xST.exit492: ; preds = %1118, %1117, %1110
load <4 x float>* null ; <<4 x float>>:1119 [#uses=1]
- mul <4 x float> %1119, zeroinitializer ; <<4 x float>>:1120 [#uses=1]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1121 [#uses=1]
+ fmul <4 x float> %1119, zeroinitializer ; <<4 x float>>:1120 [#uses=1]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1121 [#uses=1]
br i1 false, label %1123, label %1122
; <label>:1122 ; preds = %xST.exit492
@@ -1591,8 +1591,8 @@ xST.exit495: ; preds = %1130, %1129, %1122
%.07582 = phi <4 x float> [ %1121, %1122 ], [ %.17583, %1130 ], [ %.17583, %1129 ] ; <<4 x float>> [#uses=1]
%.07590 = phi <4 x float> [ %1120, %1122 ], [ %.17591, %1130 ], [ %.17591, %1129 ] ; <<4 x float>> [#uses=1]
load <4 x float>* null ; <<4 x float>>:1131 [#uses=1]
- add <4 x float> %1131, zeroinitializer ; <<4 x float>>:1132 [#uses=1]
- add <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1133 [#uses=1]
+ fadd <4 x float> %1131, zeroinitializer ; <<4 x float>>:1132 [#uses=1]
+ fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1133 [#uses=1]
br i1 false, label %1135, label %1134
; <label>:1134 ; preds = %xST.exit495
@@ -1633,10 +1633,10 @@ xST.exit498: ; preds = %1142, %1141, %1134
shufflevector <4 x float> %1143, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1148 [#uses=1]
shufflevector <4 x float> %1145, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1149 [#uses=1]
shufflevector <4 x float> %1147, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1150 [#uses=1]
- mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1151 [#uses=1]
- mul <4 x float> zeroinitializer, %1148 ; <<4 x float>>:1152 [#uses=1]
- mul <4 x float> zeroinitializer, %1149 ; <<4 x float>>:1153 [#uses=1]
- mul <4 x float> zeroinitializer, %1150 ; <<4 x float>>:1154 [#uses=1]
+ fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1151 [#uses=1]
+ fmul <4 x float> zeroinitializer, %1148 ; <<4 x float>>:1152 [#uses=1]
+ fmul <4 x float> zeroinitializer, %1149 ; <<4 x float>>:1153 [#uses=1]
+ fmul <4 x float> zeroinitializer, %1150 ; <<4 x float>>:1154 [#uses=1]
br i1 false, label %1156, label %1155
; <label>:1155 ; preds = %xST.exit498
@@ -1676,10 +1676,10 @@ xST.exit501: ; preds = %1163, %1162, %1155
load <4 x float>* %1165 ; <<4 x float>>:1166 [#uses=1]
getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1167 [#uses=1]
load <4 x float>* %1167 ; <<4 x float>>:1168 [#uses=1]
- add <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1169 [#uses=1]
- add <4 x float> zeroinitializer, %1164 ; <<4 x float>>:1170 [#uses=1]
- add <4 x float> zeroinitializer, %1166 ; <<4 x float>>:1171 [#uses=1]
- add <4 x float> zeroinitializer, %1168 ; <<4 x float>>:1172 [#uses=1]
+ fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1169 [#uses=1]
+ fadd <4 x float> zeroinitializer, %1164 ; <<4 x float>>:1170 [#uses=1]
+ fadd <4 x float> zeroinitializer, %1166 ; <<4 x float>>:1171 [#uses=1]
+ fadd <4 x float> zeroinitializer, %1168 ; <<4 x float>>:1172 [#uses=1]
br i1 false, label %1174, label %1173
; <label>:1173 ; preds = %xST.exit501
@@ -1714,7 +1714,7 @@ xST.exit504: ; preds = %1181, %1180, %1173
%.07726 = phi <4 x float> [ %1171, %1173 ], [ %.17727, %1181 ], [ %.17727, %1180 ] ; <<4 x float>> [#uses=1]
%.07730 = phi <4 x float> [ %1170, %1173 ], [ %.17731, %1181 ], [ %.17731, %1180 ] ; <<4 x float>> [#uses=1]
%.07734 = phi <4 x float> [ %1169, %1173 ], [ %.17735, %1181 ], [ %.17735, %1180 ] ; <<4 x float>> [#uses=1]
- add <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1182 [#uses=1]
+ fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1182 [#uses=1]
br i1 false, label %1184, label %1183
; <label>:1183 ; preds = %xST.exit504
diff --git a/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll b/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll
index 5cccd31..aca0faa 100644
--- a/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll
+++ b/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll
@@ -9,8 +9,8 @@ entry:
%input2 = load <4 x float>* null, align 16 ; <<4 x float>>
%shuffle7 = shufflevector <4 x float> %input2, <4 x float> < float 0.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>> [#uses=1]
- %mul1 = mul <4 x float> %shuffle7, zeroinitializer ; <<4 x
- %add2 = add <4 x float> %mul1, %input2 ; <<4 x float>>
+ %mul1 = fmul <4 x float> %shuffle7, zeroinitializer ; <<4 x
+ %add2 = fadd <4 x float> %mul1, %input2 ; <<4 x float>>
store <4 x float> %add2, <4 x float>* null, align 16
ret void
}
diff --git a/test/CodeGen/PowerPC/2008-07-15-Fabs.ll b/test/CodeGen/PowerPC/2008-07-15-Fabs.ll
index 7d86434..f55ffac 100644
--- a/test/CodeGen/PowerPC/2008-07-15-Fabs.ll
+++ b/test/CodeGen/PowerPC/2008-07-15-Fabs.ll
@@ -7,11 +7,11 @@ entry:
call ppc_fp128 @fabsl( ppc_fp128 %d ) nounwind readnone ; <ppc_fp128>:0 [#uses=1]
fcmp olt ppc_fp128 0xM00000000000000000000000000000000, %0 ; <i1>:1 [#uses=1]
%.pn106 = select i1 %1, ppc_fp128 %a, ppc_fp128 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
- %.pn = sub ppc_fp128 0xM00000000000000000000000000000000, %.pn106 ; <ppc_fp128> [#uses=1]
+ %.pn = fsub ppc_fp128 0xM00000000000000000000000000000000, %.pn106 ; <ppc_fp128> [#uses=1]
%y.0 = fdiv ppc_fp128 %.pn, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
- mul ppc_fp128 %y.0, 0xM3FF00000000000000000000000000000 ; <ppc_fp128>:2 [#uses=1]
- add ppc_fp128 %2, mul (ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128 0xM00000000000000000000000000000000) ; <ppc_fp128>:3 [#uses=1]
- %tmpi = add ppc_fp128 %3, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
+ fmul ppc_fp128 %y.0, 0xM3FF00000000000000000000000000000 ; <ppc_fp128>:2 [#uses=1]
+ fadd ppc_fp128 %2, fmul (ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128 0xM00000000000000000000000000000000) ; <ppc_fp128>:3 [#uses=1]
+ %tmpi = fadd ppc_fp128 %3, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmpi, ppc_fp128* null, align 16
ret i256 0
}
diff --git a/test/CodeGen/PowerPC/2008-07-17-Fneg.ll b/test/CodeGen/PowerPC/2008-07-17-Fneg.ll
index 54bb4b3..a7f8181 100644
--- a/test/CodeGen/PowerPC/2008-07-17-Fneg.ll
+++ b/test/CodeGen/PowerPC/2008-07-17-Fneg.ll
@@ -7,7 +7,7 @@ entry:
br i1 false, label %bb3, label %bb4
bb3: ; preds = %entry
- sub ppc_fp128 0xM80000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128>:0 [#uses=1]
+ fsub ppc_fp128 0xM80000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128>:0 [#uses=1]
fptoui ppc_fp128 %0 to i32 ; <i32>:1 [#uses=1]
zext i32 %1 to i64 ; <i64>:2 [#uses=1]
sub i64 0, %2 ; <i64>:3 [#uses=1]
diff --git a/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll b/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll
index c181b1c..b625ceb 100644
--- a/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll
+++ b/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll
@@ -29,10 +29,10 @@ bb2217: ; preds = %bb2326
%10 = load float* %9, align 4 ; <float> [#uses=1]
%11 = getelementptr float* null, i32 3 ; <float*> [#uses=1]
%12 = load float* %11, align 4 ; <float> [#uses=1]
- %13 = mul float %10, 6.553500e+04 ; <float> [#uses=1]
- %14 = add float %13, 5.000000e-01 ; <float> [#uses=1]
- %15 = mul float %12, 6.553500e+04 ; <float> [#uses=1]
- %16 = add float %15, 5.000000e-01 ; <float> [#uses=3]
+ %13 = fmul float %10, 6.553500e+04 ; <float> [#uses=1]
+ %14 = fadd float %13, 5.000000e-01 ; <float> [#uses=1]
+ %15 = fmul float %12, 6.553500e+04 ; <float> [#uses=1]
+ %16 = fadd float %15, 5.000000e-01 ; <float> [#uses=3]
%17 = fcmp olt float %14, 0.000000e+00 ; <i1> [#uses=0]
%18 = fcmp olt float %16, 0.000000e+00 ; <i1> [#uses=1]
br i1 %18, label %bb2265, label %bb2262
@@ -68,10 +68,10 @@ bb2265: ; preds = %bb2264, %bb2262, %bb2217
%37 = load float* %36, align 4 ; <float> [#uses=1]
%38 = getelementptr float* %36, i32 1 ; <float*> [#uses=1]
%39 = load float* %38, align 4 ; <float> [#uses=1]
- %40 = mul float %37, 6.553500e+04 ; <float> [#uses=1]
- %41 = add float %40, 5.000000e-01 ; <float> [#uses=1]
- %42 = mul float %39, 6.553500e+04 ; <float> [#uses=1]
- %43 = add float %42, 5.000000e-01 ; <float> [#uses=3]
+ %40 = fmul float %37, 6.553500e+04 ; <float> [#uses=1]
+ %41 = fadd float %40, 5.000000e-01 ; <float> [#uses=1]
+ %42 = fmul float %39, 6.553500e+04 ; <float> [#uses=1]
+ %43 = fadd float %42, 5.000000e-01 ; <float> [#uses=3]
%44 = fcmp olt float %41, 0.000000e+00 ; <i1> [#uses=0]
%45 = fcmp olt float %43, 0.000000e+00 ; <i1> [#uses=1]
br i1 %45, label %bb2277, label %bb2274
@@ -88,10 +88,10 @@ bb2277: ; preds = %bb2274, %bb2265
%50 = load float* %49, align 4 ; <float> [#uses=1]
%51 = getelementptr float* %36, i32 3 ; <float*> [#uses=1]
%52 = load float* %51, align 4 ; <float> [#uses=1]
- %53 = mul float %50, 6.553500e+04 ; <float> [#uses=1]
- %54 = add float %53, 5.000000e-01 ; <float> [#uses=1]
- %55 = mul float %52, 6.553500e+04 ; <float> [#uses=1]
- %56 = add float %55, 5.000000e-01 ; <float> [#uses=1]
+ %53 = fmul float %50, 6.553500e+04 ; <float> [#uses=1]
+ %54 = fadd float %53, 5.000000e-01 ; <float> [#uses=1]
+ %55 = fmul float %52, 6.553500e+04 ; <float> [#uses=1]
+ %56 = fadd float %55, 5.000000e-01 ; <float> [#uses=1]
%57 = fcmp olt float %54, 0.000000e+00 ; <i1> [#uses=0]
%58 = fcmp olt float %56, 0.000000e+00 ; <i1> [#uses=0]
%59 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
@@ -111,10 +111,10 @@ bb2277: ; preds = %bb2274, %bb2265
%73 = load float* %72, align 4 ; <float> [#uses=1]
%74 = getelementptr float* %72, i32 1 ; <float*> [#uses=1]
%75 = load float* %74, align 4 ; <float> [#uses=1]
- %76 = mul float %73, 6.553500e+04 ; <float> [#uses=1]
- %77 = add float %76, 5.000000e-01 ; <float> [#uses=3]
- %78 = mul float %75, 6.553500e+04 ; <float> [#uses=1]
- %79 = add float %78, 5.000000e-01 ; <float> [#uses=1]
+ %76 = fmul float %73, 6.553500e+04 ; <float> [#uses=1]
+ %77 = fadd float %76, 5.000000e-01 ; <float> [#uses=3]
+ %78 = fmul float %75, 6.553500e+04 ; <float> [#uses=1]
+ %79 = fadd float %78, 5.000000e-01 ; <float> [#uses=1]
%80 = fcmp olt float %77, 0.000000e+00 ; <i1> [#uses=1]
br i1 %80, label %bb2295, label %bb2292
@@ -134,10 +134,10 @@ bb2295: ; preds = %bb2294, %bb2292, %bb2277
%86 = load float* %85, align 4 ; <float> [#uses=1]
%87 = getelementptr float* %72, i32 3 ; <float*> [#uses=1]
%88 = load float* %87, align 4 ; <float> [#uses=1]
- %89 = mul float %86, 6.553500e+04 ; <float> [#uses=1]
- %90 = add float %89, 5.000000e-01 ; <float> [#uses=1]
- %91 = mul float %88, 6.553500e+04 ; <float> [#uses=1]
- %92 = add float %91, 5.000000e-01 ; <float> [#uses=1]
+ %89 = fmul float %86, 6.553500e+04 ; <float> [#uses=1]
+ %90 = fadd float %89, 5.000000e-01 ; <float> [#uses=1]
+ %91 = fmul float %88, 6.553500e+04 ; <float> [#uses=1]
+ %92 = fadd float %91, 5.000000e-01 ; <float> [#uses=1]
%93 = fcmp olt float %90, 0.000000e+00 ; <i1> [#uses=0]
%94 = fcmp olt float %92, 0.000000e+00 ; <i1> [#uses=0]
%95 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
diff --git a/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll b/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll
index 0283082..c760b41 100644
--- a/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll
+++ b/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll
@@ -3,9 +3,9 @@
define void @__divtc3({ ppc_fp128, ppc_fp128 }* noalias sret %agg.result, ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c, ppc_fp128 %d) nounwind {
entry:
%imag59 = load ppc_fp128* null, align 8 ; <ppc_fp128> [#uses=1]
- %0 = mul ppc_fp128 0xM00000000000000000000000000000000, %imag59 ; <ppc_fp128> [#uses=1]
- %1 = mul ppc_fp128 0xM00000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
- %2 = add ppc_fp128 %0, %1 ; <ppc_fp128> [#uses=1]
+ %0 = fmul ppc_fp128 0xM00000000000000000000000000000000, %imag59 ; <ppc_fp128> [#uses=1]
+ %1 = fmul ppc_fp128 0xM00000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
+ %2 = fadd ppc_fp128 %0, %1 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %2, ppc_fp128* null, align 16
unreachable
}
diff --git a/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll b/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll
index 4db5773..071c788 100644
--- a/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll
+++ b/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll
@@ -6,17 +6,17 @@ entry:
br i1 %0, label %bb5, label %bb1
bb1: ; preds = %entry
- %1 = mul ppc_fp128 %a, 0xM3DF00000000000000000000000000000 ; <ppc_fp128> [#uses=1]
+ %1 = fmul ppc_fp128 %a, 0xM3DF00000000000000000000000000000 ; <ppc_fp128> [#uses=1]
%2 = fptoui ppc_fp128 %1 to i32 ; <i32> [#uses=1]
%3 = zext i32 %2 to i64 ; <i64> [#uses=1]
%4 = shl i64 %3, 32 ; <i64> [#uses=3]
%5 = uitofp i64 %4 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %6 = sub ppc_fp128 %a, %5 ; <ppc_fp128> [#uses=3]
+ %6 = fsub ppc_fp128 %a, %5 ; <ppc_fp128> [#uses=3]
%7 = fcmp olt ppc_fp128 %6, 0xM00000000000000000000000000000000 ; <i1> [#uses=1]
br i1 %7, label %bb2, label %bb3
bb2: ; preds = %bb1
- %8 = sub ppc_fp128 0xM80000000000000000000000000000000, %6 ; <ppc_fp128> [#uses=1]
+ %8 = fsub ppc_fp128 0xM80000000000000000000000000000000, %6 ; <ppc_fp128> [#uses=1]
%9 = fptoui ppc_fp128 %8 to i32 ; <i32> [#uses=1]
%10 = zext i32 %9 to i64 ; <i64> [#uses=1]
%11 = sub i64 %4, %10 ; <i64> [#uses=1]
diff --git a/test/CodeGen/PowerPC/buildvec_canonicalize.ll b/test/CodeGen/PowerPC/buildvec_canonicalize.ll
index 66428c7..20ff3db 100644
--- a/test/CodeGen/PowerPC/buildvec_canonicalize.ll
+++ b/test/CodeGen/PowerPC/buildvec_canonicalize.ll
@@ -11,7 +11,7 @@
define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
%tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1]
%tmp3 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %tmp4 = mul <4 x float> %tmp, %tmp3 ; <<4 x float>> [#uses=1]
+ %tmp4 = fmul <4 x float> %tmp, %tmp3 ; <<4 x float>> [#uses=1]
store <4 x float> %tmp4, <4 x float>* %P3
store <4 x float> zeroinitializer, <4 x float>* %P1
store <4 x i32> zeroinitializer, <4 x i32>* %P2
diff --git a/test/CodeGen/PowerPC/fma.ll b/test/CodeGen/PowerPC/fma.ll
index fd9bd74..4a6fe70 100644
--- a/test/CodeGen/PowerPC/fma.ll
+++ b/test/CodeGen/PowerPC/fma.ll
@@ -2,53 +2,53 @@
; RUN: egrep {fn?madd|fn?msub} | count 8
define double @test_FMADD1(double %A, double %B, double %C) {
- %D = mul double %A, %B ; <double> [#uses=1]
- %E = add double %D, %C ; <double> [#uses=1]
+ %D = fmul double %A, %B ; <double> [#uses=1]
+ %E = fadd double %D, %C ; <double> [#uses=1]
ret double %E
}
define double @test_FMADD2(double %A, double %B, double %C) {
- %D = mul double %A, %B ; <double> [#uses=1]
- %E = add double %D, %C ; <double> [#uses=1]
+ %D = fmul double %A, %B ; <double> [#uses=1]
+ %E = fadd double %D, %C ; <double> [#uses=1]
ret double %E
}
define double @test_FMSUB(double %A, double %B, double %C) {
- %D = mul double %A, %B ; <double> [#uses=1]
- %E = sub double %D, %C ; <double> [#uses=1]
+ %D = fmul double %A, %B ; <double> [#uses=1]
+ %E = fsub double %D, %C ; <double> [#uses=1]
ret double %E
}
define double @test_FNMADD1(double %A, double %B, double %C) {
- %D = mul double %A, %B ; <double> [#uses=1]
- %E = add double %D, %C ; <double> [#uses=1]
- %F = sub double -0.000000e+00, %E ; <double> [#uses=1]
+ %D = fmul double %A, %B ; <double> [#uses=1]
+ %E = fadd double %D, %C ; <double> [#uses=1]
+ %F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
ret double %F
}
define double @test_FNMADD2(double %A, double %B, double %C) {
- %D = mul double %A, %B ; <double> [#uses=1]
- %E = add double %C, %D ; <double> [#uses=1]
- %F = sub double -0.000000e+00, %E ; <double> [#uses=1]
+ %D = fmul double %A, %B ; <double> [#uses=1]
+ %E = fadd double %C, %D ; <double> [#uses=1]
+ %F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
ret double %F
}
define double @test_FNMSUB1(double %A, double %B, double %C) {
- %D = mul double %A, %B ; <double> [#uses=1]
- %E = sub double %C, %D ; <double> [#uses=1]
+ %D = fmul double %A, %B ; <double> [#uses=1]
+ %E = fsub double %C, %D ; <double> [#uses=1]
ret double %E
}
define double @test_FNMSUB2(double %A, double %B, double %C) {
- %D = mul double %A, %B ; <double> [#uses=1]
- %E = sub double %D, %C ; <double> [#uses=1]
- %F = sub double -0.000000e+00, %E ; <double> [#uses=1]
+ %D = fmul double %A, %B ; <double> [#uses=1]
+ %E = fsub double %D, %C ; <double> [#uses=1]
+ %F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
ret double %F
}
define float @test_FNMSUBS(float %A, float %B, float %C) {
- %D = mul float %A, %B ; <float> [#uses=1]
- %E = sub float %D, %C ; <float> [#uses=1]
- %F = sub float -0.000000e+00, %E ; <float> [#uses=1]
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fsub float %D, %C ; <float> [#uses=1]
+ %F = fsub float -0.000000e+00, %E ; <float> [#uses=1]
ret float %F
}
diff --git a/test/CodeGen/PowerPC/fnabs.ll b/test/CodeGen/PowerPC/fnabs.ll
index b9517de..6c10dfb 100644
--- a/test/CodeGen/PowerPC/fnabs.ll
+++ b/test/CodeGen/PowerPC/fnabs.ll
@@ -4,7 +4,7 @@ declare double @fabs(double)
define double @test(double %X) {
%Y = call double @fabs( double %X ) ; <double> [#uses=1]
- %Z = sub double -0.000000e+00, %Y ; <double> [#uses=1]
+ %Z = fsub double -0.000000e+00, %Y ; <double> [#uses=1]
ret double %Z
}
diff --git a/test/CodeGen/PowerPC/fneg.ll b/test/CodeGen/PowerPC/fneg.ll
index a4f49f7..9579a74 100644
--- a/test/CodeGen/PowerPC/fneg.ll
+++ b/test/CodeGen/PowerPC/fneg.ll
@@ -2,10 +2,10 @@
define double @test1(double %a, double %b, double %c, double %d) {
entry:
- %tmp2 = sub double -0.000000e+00, %c ; <double> [#uses=1]
- %tmp4 = mul double %tmp2, %d ; <double> [#uses=1]
- %tmp7 = mul double %a, %b ; <double> [#uses=1]
- %tmp9 = sub double %tmp7, %tmp4 ; <double> [#uses=1]
+ %tmp2 = fsub double -0.000000e+00, %c ; <double> [#uses=1]
+ %tmp4 = fmul double %tmp2, %d ; <double> [#uses=1]
+ %tmp7 = fmul double %a, %b ; <double> [#uses=1]
+ %tmp9 = fsub double %tmp7, %tmp4 ; <double> [#uses=1]
ret double %tmp9
}
diff --git a/test/CodeGen/PowerPC/int-fp-conv-1.ll b/test/CodeGen/PowerPC/int-fp-conv-1.ll
index 3d66675..583408c 100644
--- a/test/CodeGen/PowerPC/int-fp-conv-1.ll
+++ b/test/CodeGen/PowerPC/int-fp-conv-1.ll
@@ -3,7 +3,7 @@
define i64 @__fixunstfdi(ppc_fp128 %a) nounwind {
entry:
%tmp1213 = uitofp i64 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp15 = sub ppc_fp128 %a, %tmp1213 ; <ppc_fp128> [#uses=1]
+ %tmp15 = fsub ppc_fp128 %a, %tmp1213 ; <ppc_fp128> [#uses=1]
%tmp2829 = fptoui ppc_fp128 %tmp15 to i32 ; <i32> [#uses=1]
%tmp282930 = zext i32 %tmp2829 to i64 ; <i64> [#uses=1]
%tmp32 = add i64 %tmp282930, 0 ; <i64> [#uses=1]
diff --git a/test/CodeGen/PowerPC/itofp128.ll b/test/CodeGen/PowerPC/itofp128.ll
index 91119e9..4d74511 100644
--- a/test/CodeGen/PowerPC/itofp128.ll
+++ b/test/CodeGen/PowerPC/itofp128.ll
@@ -6,7 +6,7 @@ target triple = "powerpc64-apple-darwin9.2.0"
define i128 @__fixunstfti(ppc_fp128 %a) nounwind {
entry:
%tmp1213 = uitofp i128 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp15 = sub ppc_fp128 %a, %tmp1213 ; <ppc_fp128> [#uses=1]
+ %tmp15 = fsub ppc_fp128 %a, %tmp1213 ; <ppc_fp128> [#uses=1]
%tmp2829 = fptoui ppc_fp128 %tmp15 to i64 ; <i64> [#uses=1]
%tmp282930 = zext i64 %tmp2829 to i128 ; <i128> [#uses=1]
%tmp32 = add i128 %tmp282930, 0 ; <i128> [#uses=1]
diff --git a/test/CodeGen/PowerPC/mem-rr-addr-mode.ll b/test/CodeGen/PowerPC/mem-rr-addr-mode.ll
index d5484bd..fd0e1d4 100644
--- a/test/CodeGen/PowerPC/mem-rr-addr-mode.ll
+++ b/test/CodeGen/PowerPC/mem-rr-addr-mode.ll
@@ -9,9 +9,9 @@ define void @func(<4 x float>* %a, <4 x float>* %b) {
%tmp = load <4 x float>* %tmp1 ; <<4 x float>> [#uses=1]
%tmp3 = getelementptr <4 x float>* %a, i32 1 ; <<4 x float>*> [#uses=1]
%tmp4 = load <4 x float>* %tmp3 ; <<4 x float>> [#uses=1]
- %tmp5 = mul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1]
+ %tmp5 = fmul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1]
%tmp8 = load <4 x float>* %b ; <<4 x float>> [#uses=1]
- %tmp9 = add <4 x float> %tmp5, %tmp8 ; <<4 x float>> [#uses=1]
+ %tmp9 = fadd <4 x float> %tmp5, %tmp8 ; <<4 x float>> [#uses=1]
store <4 x float> %tmp9, <4 x float>* %a
ret void
}
diff --git a/test/CodeGen/PowerPC/multiple-return-values.ll b/test/CodeGen/PowerPC/multiple-return-values.ll
index b72b148..3f75f7d 100644
--- a/test/CodeGen/PowerPC/multiple-return-values.ll
+++ b/test/CodeGen/PowerPC/multiple-return-values.ll
@@ -3,7 +3,7 @@
define {i64, float} @bar(i64 %a, float %b) {
%y = add i64 %a, 7
- %z = add float %b, 7.0
+ %z = fadd float %b, 7.0
ret i64 %y, float %z
}
diff --git a/test/CodeGen/PowerPC/ppcf128-1-opt.ll b/test/CodeGen/PowerPC/ppcf128-1-opt.ll
index 5c059b4..e3c5ab1 100644
--- a/test/CodeGen/PowerPC/ppcf128-1-opt.ll
+++ b/test/CodeGen/PowerPC/ppcf128-1-opt.ll
@@ -5,19 +5,19 @@ target triple = "powerpc-apple-darwin8"
define ppc_fp128 @plus(ppc_fp128 %x, ppc_fp128 %y) {
entry:
- %tmp3 = add ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1]
+ %tmp3 = fadd ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1]
ret ppc_fp128 %tmp3
}
define ppc_fp128 @minus(ppc_fp128 %x, ppc_fp128 %y) {
entry:
- %tmp3 = sub ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1]
+ %tmp3 = fsub ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1]
ret ppc_fp128 %tmp3
}
define ppc_fp128 @times(ppc_fp128 %x, ppc_fp128 %y) {
entry:
- %tmp3 = mul ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1]
+ %tmp3 = fmul ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1]
ret ppc_fp128 %tmp3
}
diff --git a/test/CodeGen/PowerPC/ppcf128-1.ll b/test/CodeGen/PowerPC/ppcf128-1.ll
index ea8dd37..a487de7 100644
--- a/test/CodeGen/PowerPC/ppcf128-1.ll
+++ b/test/CodeGen/PowerPC/ppcf128-1.ll
@@ -14,7 +14,7 @@ entry:
store ppc_fp128 %y, ppc_fp128* %y_addr
%tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
%tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp3 = add ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
+ %tmp3 = fadd ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
%tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
@@ -36,7 +36,7 @@ entry:
store ppc_fp128 %y, ppc_fp128* %y_addr
%tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
%tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp3 = sub ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
+ %tmp3 = fsub ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
%tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
@@ -58,7 +58,7 @@ entry:
store ppc_fp128 %y, ppc_fp128* %y_addr
%tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
%tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp3 = mul ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
+ %tmp3 = fmul ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
%tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
diff --git a/test/CodeGen/PowerPC/ppcf128-2.ll b/test/CodeGen/PowerPC/ppcf128-2.ll
index b4f61f8..4318226 100644
--- a/test/CodeGen/PowerPC/ppcf128-2.ll
+++ b/test/CodeGen/PowerPC/ppcf128-2.ll
@@ -4,7 +4,7 @@ define i64 @__fixtfdi(ppc_fp128 %a) nounwind {
entry:
br i1 false, label %bb, label %bb8
bb: ; preds = %entry
- %tmp5 = sub ppc_fp128 0xM80000000000000000000000000000000, %a ; <ppc_fp128> [#uses=1]
+ %tmp5 = fsub ppc_fp128 0xM80000000000000000000000000000000, %a ; <ppc_fp128> [#uses=1]
%tmp6 = tail call i64 @__fixunstfdi( ppc_fp128 %tmp5 ) nounwind ; <i64> [#uses=0]
ret i64 0
bb8: ; preds = %entry
diff --git a/test/CodeGen/PowerPC/ppcf128-4.ll b/test/CodeGen/PowerPC/ppcf128-4.ll
index 8921dfc..16d6178 100644
--- a/test/CodeGen/PowerPC/ppcf128-4.ll
+++ b/test/CodeGen/PowerPC/ppcf128-4.ll
@@ -2,9 +2,9 @@
define ppc_fp128 @__floatditf(i64 %u) nounwind {
entry:
- %tmp6 = mul ppc_fp128 0xM00000000000000000000000000000000, 0xM41F00000000000000000000000000000
+ %tmp6 = fmul ppc_fp128 0xM00000000000000000000000000000000, 0xM41F00000000000000000000000000000
%tmp78 = trunc i64 %u to i32
%tmp789 = uitofp i32 %tmp78 to ppc_fp128
- %tmp11 = add ppc_fp128 %tmp789, %tmp6
+ %tmp11 = fadd ppc_fp128 %tmp789, %tmp6
ret ppc_fp128 %tmp11
}
diff --git a/test/CodeGen/PowerPC/return-val-i128.ll b/test/CodeGen/PowerPC/return-val-i128.ll
index 6e68ee3..27a5004 100644
--- a/test/CodeGen/PowerPC/return-val-i128.ll
+++ b/test/CodeGen/PowerPC/return-val-i128.ll
@@ -14,7 +14,7 @@ entry:
br i1 %toBool, label %bb, label %bb8
bb: ; preds = %entry
%tmp4 = load float* %a_addr, align 4 ; <float> [#uses=1]
- %tmp5 = sub float -0.000000e+00, %tmp4 ; <float> [#uses=1]
+ %tmp5 = fsub float -0.000000e+00, %tmp4 ; <float> [#uses=1]
%tmp6 = call i128 @__fixunssfDI( float %tmp5 ) nounwind ; <i128> [#uses=1]
%tmp7 = sub i128 0, %tmp6 ; <i128> [#uses=1]
store i128 %tmp7, i128* %tmp, align 16
diff --git a/test/CodeGen/PowerPC/unsafe-math.ll b/test/CodeGen/PowerPC/unsafe-math.ll
index 3d52d0c..d211b3b 100644
--- a/test/CodeGen/PowerPC/unsafe-math.ll
+++ b/test/CodeGen/PowerPC/unsafe-math.ll
@@ -3,8 +3,8 @@
; RUN: grep fmul | count 1
define double @foo(double %X) {
- %tmp1 = mul double %X, 1.23
- %tmp2 = mul double %tmp1, 4.124
+ %tmp1 = fmul double %X, 1.23
+ %tmp2 = fmul double %tmp1, 4.124
ret double %tmp2
}
diff --git a/test/CodeGen/PowerPC/vec_fneg.ll b/test/CodeGen/PowerPC/vec_fneg.ll
index 2ef2099..9fdbffd 100644
--- a/test/CodeGen/PowerPC/vec_fneg.ll
+++ b/test/CodeGen/PowerPC/vec_fneg.ll
@@ -2,7 +2,7 @@
define void @t(<4 x float>* %A) {
%tmp2 = load <4 x float>* %A
- %tmp3 = sub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp2
+ %tmp3 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp2
store <4 x float> %tmp3, <4 x float>* %A
ret void
}
diff --git a/test/CodeGen/PowerPC/vec_splat.ll b/test/CodeGen/PowerPC/vec_splat.ll
index a631137..7b7e4fe 100644
--- a/test/CodeGen/PowerPC/vec_splat.ll
+++ b/test/CodeGen/PowerPC/vec_splat.ll
@@ -15,7 +15,7 @@ define void @splat(%f4* %P, %f4* %Q, float %X) nounwind {
%tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1]
%tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1]
%q = load %f4* %Q ; <%f4> [#uses=1]
- %R = add %f4 %q, %tmp6 ; <%f4> [#uses=1]
+ %R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1]
store %f4 %R, %f4* %P
ret void
}
diff --git a/test/CodeGen/PowerPC/vec_zero.ll b/test/CodeGen/PowerPC/vec_zero.ll
index 8d06a7d..7350e91 100644
--- a/test/CodeGen/PowerPC/vec_zero.ll
+++ b/test/CodeGen/PowerPC/vec_zero.ll
@@ -2,7 +2,7 @@
define void @foo(<4 x float>* %P) {
%T = load <4 x float>* %P ; <<4 x float>> [#uses=1]
- %S = add <4 x float> zeroinitializer, %T ; <<4 x float>> [#uses=1]
+ %S = fadd <4 x float> zeroinitializer, %T ; <<4 x float>> [#uses=1]
store <4 x float> %S, <4 x float>* %P
ret void
}
diff --git a/test/CodeGen/PowerPC/vector.ll b/test/CodeGen/PowerPC/vector.ll
index 679e69e..a6c17b4 100644
--- a/test/CodeGen/PowerPC/vector.ll
+++ b/test/CodeGen/PowerPC/vector.ll
@@ -14,7 +14,7 @@
define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
%p = load %f1* %P ; <%f1> [#uses=1]
%q = load %f1* %Q ; <%f1> [#uses=1]
- %R = add %f1 %p, %q ; <%f1> [#uses=1]
+ %R = fadd %f1 %p, %q ; <%f1> [#uses=1]
store %f1 %R, %f1* %S
ret void
}
@@ -22,7 +22,7 @@ define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
%p = load %f2* %P ; <%f2> [#uses=1]
%q = load %f2* %Q ; <%f2> [#uses=1]
- %R = add %f2 %p, %q ; <%f2> [#uses=1]
+ %R = fadd %f2 %p, %q ; <%f2> [#uses=1]
store %f2 %R, %f2* %S
ret void
}
@@ -30,7 +30,7 @@ define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
%p = load %f4* %P ; <%f4> [#uses=1]
%q = load %f4* %Q ; <%f4> [#uses=1]
- %R = add %f4 %p, %q ; <%f4> [#uses=1]
+ %R = fadd %f4 %p, %q ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
ret void
}
@@ -38,7 +38,7 @@ define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
%p = load %f8* %P ; <%f8> [#uses=1]
%q = load %f8* %Q ; <%f8> [#uses=1]
- %R = add %f8 %p, %q ; <%f8> [#uses=1]
+ %R = fadd %f8 %p, %q ; <%f8> [#uses=1]
store %f8 %R, %f8* %S
ret void
}
@@ -46,7 +46,7 @@ define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) {
%p = load %f8* %P ; <%f8> [#uses=1]
%q = load %f8* %Q ; <%f8> [#uses=1]
- %R = mul %f8 %p, %q ; <%f8> [#uses=1]
+ %R = fmul %f8 %p, %q ; <%f8> [#uses=1]
store %f8 %R, %f8* %S
ret void
}
@@ -63,7 +63,7 @@ define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
define void @test_cst(%f4* %P, %f4* %S) {
%p = load %f4* %P ; <%f4> [#uses=1]
- %R = add %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float
+ %R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float
2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
ret void
@@ -71,14 +71,14 @@ define void @test_cst(%f4* %P, %f4* %S) {
define void @test_zero(%f4* %P, %f4* %S) {
%p = load %f4* %P ; <%f4> [#uses=1]
- %R = add %f4 %p, zeroinitializer ; <%f4> [#uses=1]
+ %R = fadd %f4 %p, zeroinitializer ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
ret void
}
define void @test_undef(%f4* %P, %f4* %S) {
%p = load %f4* %P ; <%f4> [#uses=1]
- %R = add %f4 %p, undef ; <%f4> [#uses=1]
+ %R = fadd %f4 %p, undef ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
ret void
}
@@ -116,7 +116,7 @@ define double @test_extract_elt2(%d8* %P) {
define void @test_cast_1(%f4* %b, %i4* %a) {
%tmp = load %f4* %b ; <%f4> [#uses=1]
- %tmp2 = add %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float
+ %tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float
3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1]
%tmp3 = bitcast %f4 %tmp2 to %i4 ; <%i4> [#uses=1]
%tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 >
@@ -140,7 +140,7 @@ define void @splat(%f4* %P, %f4* %Q, float %X) {
%tmp4 = insertelement %f4 %tmp2, float %X, i32 2
%tmp6 = insertelement %f4 %tmp4, float %X, i32 3
%q = load %f4* %Q ; <%f4> [#uses=1]
- %R = add %f4 %q, %tmp6 ; <%f4> [#uses=1]
+ %R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1]
store %f4 %R, %f4* %P
ret void
}