aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen
diff options
context:
space:
mode:
authorDale Johannesen <dalej@apple.com>2007-09-05 17:50:36 +0000
committerDale Johannesen <dalej@apple.com>2007-09-05 17:50:36 +0000
commitc2ec2baf3d4de95695e695ee404efc251efba6d4 (patch)
tree24eff24c3171c525448ce5326e6e5c549e4525dc /test/CodeGen
parentd4af306aec6b37abf6d5509e107f277b681ec83d (diff)
downloadexternal_llvm-c2ec2baf3d4de95695e695ee404efc251efba6d4.zip
external_llvm-c2ec2baf3d4de95695e695ee404efc251efba6d4.tar.gz
external_llvm-c2ec2baf3d4de95695e695ee404efc251efba6d4.tar.bz2
Change all floating constants that are not exactly
representable to use hex format. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@41722 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/ARM/illegal-vector-bitcast.ll2
-rw-r--r--test/CodeGen/CBackend/2006-12-11-Float-Bitcast.ll4
-rw-r--r--test/CodeGen/Generic/2006-07-03-schedulers.ll2
-rw-r--r--test/CodeGen/Generic/constindices.ll6
-rw-r--r--test/CodeGen/Generic/sched.ll2
-rw-r--r--test/CodeGen/Generic/select.ll2
-rw-r--r--test/CodeGen/Generic/vector.ll2
-rw-r--r--test/CodeGen/PowerPC/vector.ll2
-rw-r--r--test/CodeGen/X86/peep-vector-extract-insert.ll2
-rw-r--r--test/CodeGen/X86/store-fp-constant.ll6
-rw-r--r--test/CodeGen/X86/v4f32-immediate.ll2
-rw-r--r--test/CodeGen/X86/vector.ll2
12 files changed, 17 insertions, 17 deletions
diff --git a/test/CodeGen/ARM/illegal-vector-bitcast.ll b/test/CodeGen/ARM/illegal-vector-bitcast.ll
index 6785cfd..79f9929 100644
--- a/test/CodeGen/ARM/illegal-vector-bitcast.ll
+++ b/test/CodeGen/ARM/illegal-vector-bitcast.ll
@@ -3,7 +3,7 @@
define void @foo(<8 x float>* %f, <8 x float>* %g, <4 x i64>* %y)
{
%h = load <8 x float>* %f
- %i = mul <8 x float> %h, <float 1.1, float 3.3, float 4.4, float 5.4, float 0.5, float 0.6, float 0.7, float 0.8>
+ %i = mul <8 x float> %h, <float 0x3FF19999A0000000, float 0x400A666660000000, float 0x40119999A0000000, float 0x40159999A0000000, float 0.5, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000>
%m = bitcast <8 x float> %i to <4 x i64>
%z = load <4 x i64>* %y
%n = mul <4 x i64> %z, %m
diff --git a/test/CodeGen/CBackend/2006-12-11-Float-Bitcast.ll b/test/CodeGen/CBackend/2006-12-11-Float-Bitcast.ll
index df03c4e..e692005 100644
--- a/test/CodeGen/CBackend/2006-12-11-Float-Bitcast.ll
+++ b/test/CodeGen/CBackend/2006-12-11-Float-Bitcast.ll
@@ -38,9 +38,9 @@ float %test6(float %F) {
}
int %main(int %argc, sbyte** %argv) {
- %a = call int %test1(float 3.1415926)
+ %a = call int %test1(float 0x400921FB40000000)
%b = call float %test2(int %a)
- %c = call long %test3(double 3.1415926)
+ %c = call long %test3(double 0x400921FB4D12D84A)
%d = call double %test4(long %c)
%e = call double %test5(double 7.0)
%f = call float %test6(float 7.0)
diff --git a/test/CodeGen/Generic/2006-07-03-schedulers.ll b/test/CodeGen/Generic/2006-07-03-schedulers.ll
index 6edb7a0..a262850 100644
--- a/test/CodeGen/Generic/2006-07-03-schedulers.ll
+++ b/test/CodeGen/Generic/2006-07-03-schedulers.ll
@@ -20,7 +20,7 @@ bb1:
%z1 = add float %x1, %y1 ;; z1 = x1 + y1
%x2 = mul float %x, 0.5 ;; x2
- %y2 = mul float %y, 0.9 ;; y2
+ %y2 = mul float %y, 0x3FECCCCCC0000000 ;; y2
%z2 = add float %x2, %y2 ;; z2 = x2 + y2
%z3 = add float %z1, %z2 ;; z3 = z1 + z2
diff --git a/test/CodeGen/Generic/constindices.ll b/test/CodeGen/Generic/constindices.ll
index b176144..2467c37 100644
--- a/test/CodeGen/Generic/constindices.ll
+++ b/test/CodeGen/Generic/constindices.ll
@@ -21,14 +21,14 @@ begin
%ArrayB = alloca %MixedB, uint 3
%I1 = getelementptr %MixedA* %ScalarA, long 0, uint 0
- store float 1.4142, float *%I1
+ store float 0x3FF6A09020000000, float *%I1
%I2 = getelementptr %MixedB* %ScalarB, long 0, uint 1, uint 0
- store float 2.7183, float *%I2
+ store float 0x4005BF1420000000, float *%I2
%fptrA = getelementptr %MixedA* %ArrayA, long 1, uint 0
%fptrB = getelementptr %MixedB* %ArrayB, long 2, uint 1, uint 0
- store float 3.1415, float* %fptrA
+ store float 0x400921CAC0000000, float* %fptrA
store float 5.0, float* %fptrB
;; Test that a sequence of GEPs with constant indices are folded right
diff --git a/test/CodeGen/Generic/sched.ll b/test/CodeGen/Generic/sched.ll
index ed2f44e..f66e9f0 100644
--- a/test/CodeGen/Generic/sched.ll
+++ b/test/CodeGen/Generic/sched.ll
@@ -13,7 +13,7 @@ bb1:
%z1 = add float %x1, %y1 ;; z1 = x1 + y1
%x2 = mul float %x, 0.5 ;; x2
- %y2 = mul float %y, 0.9 ;; y2
+ %y2 = mul float %y, 0x3FECCCCCC0000000 ;; y2
%z2 = add float %x2, %y2 ;; z2 = x2 + y2
%z3 = add float %z1, %z2 ;; z3 = z1 + z2
diff --git a/test/CodeGen/Generic/select.ll b/test/CodeGen/Generic/select.ll
index edf3641..87ea405 100644
--- a/test/CodeGen/Generic/select.ll
+++ b/test/CodeGen/Generic/select.ll
@@ -15,7 +15,7 @@ begin
%i = add int %N, 12345678 ; constant has to be loaded
%b = add short 4, 3 ; one of the operands shd be immed
%c = add float %X, 0.0 ; will this be optimzzed?
- %d = add float %X, 3.1415 ; constant has to be loaded
+ %d = add float %X, 0x400921CAC0000000 ; constant has to be loaded
%f = add uint 4294967295, 10 ; result shd be 9 (not in immed fld)
%g = add ushort 20, 65535 ; result shd be 19 (65536 in immed fld)
%j = add ushort 65535, 30 ; result shd be 29 (not in immed fld)
diff --git a/test/CodeGen/Generic/vector.ll b/test/CodeGen/Generic/vector.ll
index 59f554b..0b21c75 100644
--- a/test/CodeGen/Generic/vector.ll
+++ b/test/CodeGen/Generic/vector.ll
@@ -64,7 +64,7 @@ void %test_div(%f8 *%P, %f8* %Q, %f8 *%S) {
void %test_cst(%f4 *%P, %f4 *%S) {
%p = load %f4* %P
- %R = add %f4 %p, <float 0.1, float 1.0, float 2.0, float 4.5>
+ %R = add %f4 %p, <float 0x3FB99999A0000000, float 1.0, float 2.0, float 4.5>
store %f4 %R, %f4 *%S
ret void
}
diff --git a/test/CodeGen/PowerPC/vector.ll b/test/CodeGen/PowerPC/vector.ll
index f8dbbb0..789b543 100644
--- a/test/CodeGen/PowerPC/vector.ll
+++ b/test/CodeGen/PowerPC/vector.ll
@@ -65,7 +65,7 @@ void %test_div(%f8 *%P, %f8* %Q, %f8 *%S) {
void %test_cst(%f4 *%P, %f4 *%S) {
%p = load %f4* %P
- %R = add %f4 %p, <float 0.1, float 1.0, float 2.0, float 4.5>
+ %R = add %f4 %p, <float 0x3FB99999A0000000, float 1.0, float 2.0, float 4.5>
store %f4 %R, %f4 *%S
ret void
}
diff --git a/test/CodeGen/X86/peep-vector-extract-insert.ll b/test/CodeGen/X86/peep-vector-extract-insert.ll
index 764a8c4..77332d0 100644
--- a/test/CodeGen/X86/peep-vector-extract-insert.ll
+++ b/test/CodeGen/X86/peep-vector-extract-insert.ll
@@ -6,7 +6,7 @@ define float @foo(<4 x float> %a) {
ret float %c
}
define float @bar(float %a) {
- %b = insertelement <4 x float> <float 3.4, float 4.5, float 0.0, float 9.2>, float %a, i32 3
+ %b = insertelement <4 x float> <float 0x400B333340000000, float 4.5, float 0.0, float 0x4022666660000000>, float %a, i32 3
%c = extractelement <4 x float> %b, i32 2
ret float %c
}
diff --git a/test/CodeGen/X86/store-fp-constant.ll b/test/CodeGen/X86/store-fp-constant.ll
index 3a80080..80f4e67 100644
--- a/test/CodeGen/X86/store-fp-constant.ll
+++ b/test/CodeGen/X86/store-fp-constant.ll
@@ -12,9 +12,9 @@ declare void %extdouble(double)
implementation
void %testfloatstore() {
- call void %extfloat(float 1234.4)
- call void %extdouble(double 1234.4123)
- store float 13.0123, float* %G
+ call void %extfloat(float 0x40934999A0000000)
+ call void %extdouble(double 0x409349A631F8A090)
+ store float 0x402A064C20000000, float* %G
ret void
}
diff --git a/test/CodeGen/X86/v4f32-immediate.ll b/test/CodeGen/X86/v4f32-immediate.ll
index 67b5e79..bd6045c 100644
--- a/test/CodeGen/X86/v4f32-immediate.ll
+++ b/test/CodeGen/X86/v4f32-immediate.ll
@@ -1,5 +1,5 @@
; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse | grep movaps
define <4 x float> @foo() {
- ret <4 x float> <float 3.223542354, float 2.3, float 1.2, float 0.1>
+ ret <4 x float> <float 0x4009C9D0A0000000, float 0x4002666660000000, float 0x3FF3333340000000, float 0x3FB99999A0000000>
}
diff --git a/test/CodeGen/X86/vector.ll b/test/CodeGen/X86/vector.ll
index 348d4d6..4925ea8 100644
--- a/test/CodeGen/X86/vector.ll
+++ b/test/CodeGen/X86/vector.ll
@@ -65,7 +65,7 @@ void %test_div(%f8 *%P, %f8* %Q, %f8 *%S) {
void %test_cst(%f4 *%P, %f4 *%S) {
%p = load %f4* %P
- %R = add %f4 %p, <float 0.1, float 1.0, float 2.0, float 4.5>
+ %R = add %f4 %p, <float 0x3FB99999A0000000, float 1.0, float 2.0, float 4.5>
store %f4 %R, %f4 *%S
ret void
}