aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2011-11-27 06:54:59 +0000
committerChris Lattner <sabre@nondot.org>2011-11-27 06:54:59 +0000
commitd2bf432b2b6ba02e20958953a237213d48b00f20 (patch)
treee7f62c784abde309223ad5206d56fa2d33dd8b12 /test/CodeGen
parent8ddff91282ec36360677d0febd34803fd9f02153 (diff)
downloadexternal_llvm-d2bf432b2b6ba02e20958953a237213d48b00f20.zip
external_llvm-d2bf432b2b6ba02e20958953a237213d48b00f20.tar.gz
external_llvm-d2bf432b2b6ba02e20958953a237213d48b00f20.tar.bz2
Upgrade syntax of tests using volatile instructions to use 'load volatile' instead of 'volatile load', which is archaic.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145171 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll2
-rw-r--r--test/CodeGen/ARM/call.ll2
-rw-r--r--test/CodeGen/CBackend/2005-02-14-VolatileOperations.ll8
-rw-r--r--test/CodeGen/CBackend/2005-09-27-VolatileFuncPtr.ll10
-rw-r--r--test/CodeGen/CBackend/2008-02-01-UnalignedLoadStore.ll4
-rw-r--r--test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll4
-rw-r--r--test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll8
-rw-r--r--test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll4
-rw-r--r--test/CodeGen/MSP430/2009-10-10-OrImpDef.ll4
-rw-r--r--test/CodeGen/MSP430/AddrMode-bis-rx.ll2
-rw-r--r--test/CodeGen/MSP430/AddrMode-bis-xr.ll4
-rw-r--r--test/CodeGen/MSP430/AddrMode-mov-rx.ll2
-rw-r--r--test/CodeGen/MSP430/AddrMode-mov-xr.ll2
-rw-r--r--test/CodeGen/Mips/2010-07-20-Switch.ll4
-rw-r--r--test/CodeGen/Mips/mipslopat.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll6
-rw-r--r--test/CodeGen/Thumb/vargs.ll4
-rw-r--r--test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll4
-rw-r--r--test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll2
-rw-r--r--test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll10
-rw-r--r--test/CodeGen/X86/2008-09-29-VolatileBug.ll2
-rw-r--r--test/CodeGen/X86/2009-01-31-BigShift2.ll2
-rw-r--r--test/CodeGen/X86/2009-03-23-MultiUseSched.ll50
-rw-r--r--test/CodeGen/X86/2009-05-11-tailmerge-crash.ll2
-rw-r--r--test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll6
-rw-r--r--test/CodeGen/X86/block-placement.ll130
-rw-r--r--test/CodeGen/X86/cmov.ll4
-rw-r--r--test/CodeGen/X86/coalescer-commute1.ll2
-rw-r--r--test/CodeGen/X86/crash.ll6
-rw-r--r--test/CodeGen/X86/fp-stack-ret-conv.ll2
-rw-r--r--test/CodeGen/X86/loop-strength-reduce5.ll4
-rw-r--r--test/CodeGen/X86/lsr-nonaffine.ll2
-rw-r--r--test/CodeGen/X86/lsr-sort.ll2
-rw-r--r--test/CodeGen/X86/nancvt.ll12
-rw-r--r--test/CodeGen/X86/narrow-shl-load.ll2
-rw-r--r--test/CodeGen/X86/overlap-shift.ll2
-rw-r--r--test/CodeGen/X86/pr1505b.ll4
-rw-r--r--test/CodeGen/X86/pr2182.ll16
-rw-r--r--test/CodeGen/X86/sext-subreg.ll6
-rw-r--r--test/CodeGen/X86/stack-align.ll6
-rw-r--r--test/CodeGen/X86/store-empty-member.ll2
-rw-r--r--test/CodeGen/X86/tail-opts.ll20
-rw-r--r--test/CodeGen/X86/twoaddr-lea.ll2
-rw-r--r--test/CodeGen/X86/vec_shuffle-23.ll2
-rw-r--r--test/CodeGen/X86/vec_shuffle-24.ll2
-rw-r--r--test/CodeGen/X86/volatile.ll10
-rw-r--r--test/CodeGen/XCore/licm-ldwcp.ll2
-rw-r--r--test/CodeGen/XCore/scavenging.ll48
48 files changed, 210 insertions, 228 deletions
diff --git a/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll b/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll
index 78c6222..94c562b 100644
--- a/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll
+++ b/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll
@@ -11,7 +11,7 @@ bb74.i: ; preds = %bb88.i, %bb74.i, %entry
bb88.i: ; preds = %bb74.i
br i1 false, label %mandel.exit, label %bb74.i
mandel.exit: ; preds = %bb88.i
- %tmp2 = volatile load double* getelementptr ({ double, double }* @accum, i32 0, i32 0), align 8 ; <double> [#uses=1]
+ %tmp2 = load volatile double* getelementptr ({ double, double }* @accum, i32 0, i32 0), align 8 ; <double> [#uses=1]
%tmp23 = fptosi double %tmp2 to i32 ; <i32> [#uses=1]
%tmp5 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @.str, i32 0, i32 0), i32 %tmp23 ) ; <i32> [#uses=0]
ret i32 0
diff --git a/test/CodeGen/ARM/call.ll b/test/CodeGen/ARM/call.ll
index 0f9543f..107e79a 100644
--- a/test/CodeGen/ARM/call.ll
+++ b/test/CodeGen/ARM/call.ll
@@ -26,7 +26,7 @@ define i32* @m_231b(i32, i32, i32*, i32*, i32*) nounwind {
; CHECKV4: bx r{{.*}}
BB0:
%5 = inttoptr i32 %0 to i32* ; <i32*> [#uses=1]
- %t35 = volatile load i32* %5 ; <i32> [#uses=1]
+ %t35 = load volatile i32* %5 ; <i32> [#uses=1]
%6 = inttoptr i32 %t35 to i32** ; <i32**> [#uses=1]
%7 = getelementptr i32** %6, i32 86 ; <i32**> [#uses=1]
%8 = load i32** %7 ; <i32*> [#uses=1]
diff --git a/test/CodeGen/CBackend/2005-02-14-VolatileOperations.ll b/test/CodeGen/CBackend/2005-02-14-VolatileOperations.ll
deleted file mode 100644
index dd505af..0000000
--- a/test/CodeGen/CBackend/2005-02-14-VolatileOperations.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=c | grep volatile
-
-define void @test(i32* %P) {
- %X = volatile load i32* %P ; <i32> [#uses=1]
- volatile store i32 %X, i32* %P
- ret void
-}
-
diff --git a/test/CodeGen/CBackend/2005-09-27-VolatileFuncPtr.ll b/test/CodeGen/CBackend/2005-09-27-VolatileFuncPtr.ll
deleted file mode 100644
index 99de837..0000000
--- a/test/CodeGen/CBackend/2005-09-27-VolatileFuncPtr.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=c | grep {\\* *volatile *\\*}
-
-@G = external global void ()* ; <void ()**> [#uses=2]
-
-define void @test() {
- volatile store void ()* @test, void ()** @G
- volatile load void ()** @G ; <void ()*>:1 [#uses=0]
- ret void
-}
-
diff --git a/test/CodeGen/CBackend/2008-02-01-UnalignedLoadStore.ll b/test/CodeGen/CBackend/2008-02-01-UnalignedLoadStore.ll
index 6e0cf68..e6eeba3 100644
--- a/test/CodeGen/CBackend/2008-02-01-UnalignedLoadStore.ll
+++ b/test/CodeGen/CBackend/2008-02-01-UnalignedLoadStore.ll
@@ -8,8 +8,8 @@ define void @test(i32* %P) {
}
define void @test2(i32* %P) {
- %X = volatile load i32* %P, align 2
- volatile store i32 %X, i32* %P, align 2
+ %X = load volatile i32* %P, align 2
+ store volatile i32 %X, i32* %P, align 2
ret void
}
diff --git a/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll b/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll
index f339373..4c7d2d0 100644
--- a/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll
+++ b/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll
@@ -7,9 +7,9 @@ target triple = "msp430-unknown-linux-gnu"
define void @uip_arp_arpin() nounwind {
entry:
- %tmp = volatile load i16* @uip_len ; <i16> [#uses=1]
+ %tmp = load volatile i16* @uip_len ; <i16> [#uses=1]
%cmp = icmp ult i16 %tmp, 42 ; <i1> [#uses=1]
- volatile store i16 0, i16* @uip_len
+ store volatile i16 0, i16* @uip_len
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
diff --git a/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll b/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll
index 088d3e1..e8c0d14 100644
--- a/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll
+++ b/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll
@@ -6,8 +6,8 @@ target triple = "msp430-generic-generic"
define i16 @foo() nounwind readnone {
entry:
%result = alloca i16, align 1 ; <i16*> [#uses=2]
- volatile store i16 0, i16* %result
- %tmp = volatile load i16* %result ; <i16> [#uses=1]
+ store volatile i16 0, i16* %result
+ %tmp = load volatile i16* %result ; <i16> [#uses=1]
ret i16 %tmp
}
@@ -22,8 +22,8 @@ while.cond: ; preds = %while.cond, %entry
while.end: ; preds = %while.cond
%result.i = alloca i16, align 1 ; <i16*> [#uses=2]
- volatile store i16 0, i16* %result.i
- %tmp.i = volatile load i16* %result.i ; <i16> [#uses=0]
+ store volatile i16 0, i16* %result.i
+ %tmp.i = load volatile i16* %result.i ; <i16> [#uses=0]
ret i16 0
}
diff --git a/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll b/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll
index 4d7d9b9..9fab482 100644
--- a/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll
+++ b/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll
@@ -11,10 +11,10 @@ entry:
%x.addr = alloca i8 ; <i8*> [#uses=2]
%tmp = alloca i8, align 1 ; <i8*> [#uses=2]
store i8 %x, i8* %x.addr
- %tmp1 = volatile load i8* @"\010x0021" ; <i8> [#uses=1]
+ %tmp1 = load volatile i8* @"\010x0021" ; <i8> [#uses=1]
store i8 %tmp1, i8* %tmp
%tmp2 = load i8* %x.addr ; <i8> [#uses=1]
- volatile store i8 %tmp2, i8* @"\010x0021"
+ store volatile i8 %tmp2, i8* @"\010x0021"
%tmp3 = load i8* %tmp ; <i8> [#uses=1]
store i8 %tmp3, i8* %retval
%0 = load i8* %retval ; <i8> [#uses=1]
diff --git a/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll b/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll
index 856eb9d..c1a186a 100644
--- a/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll
+++ b/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll
@@ -4,9 +4,9 @@ define void @foo() nounwind {
entry:
%r = alloca i8 ; <i8*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- volatile load i8* %r, align 1 ; <i8>:0 [#uses=1]
+ load volatile i8* %r, align 1 ; <i8>:0 [#uses=1]
or i8 %0, 1 ; <i8>:1 [#uses=1]
- volatile store i8 %1, i8* %r, align 1
+ store volatile i8 %1, i8* %r, align 1
br label %return
return: ; preds = %entry
diff --git a/test/CodeGen/MSP430/AddrMode-bis-rx.ll b/test/CodeGen/MSP430/AddrMode-bis-rx.ll
index 4f9a724..c7ecb5a 100644
--- a/test/CodeGen/MSP430/AddrMode-bis-rx.ll
+++ b/test/CodeGen/MSP430/AddrMode-bis-rx.ll
@@ -32,7 +32,7 @@ define i8 @am3(i8 %x, i16 %n) nounwind {
; CHECK: bis.b bar(r14), r15
define i16 @am4(i16 %x) nounwind {
- %1 = volatile load i16* inttoptr(i16 32 to i16*)
+ %1 = load volatile i16* inttoptr(i16 32 to i16*)
%2 = or i16 %1,%x
ret i16 %2
}
diff --git a/test/CodeGen/MSP430/AddrMode-bis-xr.ll b/test/CodeGen/MSP430/AddrMode-bis-xr.ll
index 17ebd87..727c29f 100644
--- a/test/CodeGen/MSP430/AddrMode-bis-xr.ll
+++ b/test/CodeGen/MSP430/AddrMode-bis-xr.ll
@@ -35,9 +35,9 @@ define void @am3(i16 %i, i8 %x) nounwind {
; CHECK: bis.b r14, bar(r15)
define void @am4(i16 %x) nounwind {
- %1 = volatile load i16* inttoptr(i16 32 to i16*)
+ %1 = load volatile i16* inttoptr(i16 32 to i16*)
%2 = or i16 %x, %1
- volatile store i16 %2, i16* inttoptr(i16 32 to i16*)
+ store volatile i16 %2, i16* inttoptr(i16 32 to i16*)
ret void
}
; CHECK: am4:
diff --git a/test/CodeGen/MSP430/AddrMode-mov-rx.ll b/test/CodeGen/MSP430/AddrMode-mov-rx.ll
index 6676b88..7cd345b 100644
--- a/test/CodeGen/MSP430/AddrMode-mov-rx.ll
+++ b/test/CodeGen/MSP430/AddrMode-mov-rx.ll
@@ -29,7 +29,7 @@ define i8 @am3(i16 %n) nounwind {
; CHECK: mov.b bar(r15), r15
define i16 @am4() nounwind {
- %1 = volatile load i16* inttoptr(i16 32 to i16*)
+ %1 = load volatile i16* inttoptr(i16 32 to i16*)
ret i16 %1
}
; CHECK: am4:
diff --git a/test/CodeGen/MSP430/AddrMode-mov-xr.ll b/test/CodeGen/MSP430/AddrMode-mov-xr.ll
index 4b327b0..5eeb02f 100644
--- a/test/CodeGen/MSP430/AddrMode-mov-xr.ll
+++ b/test/CodeGen/MSP430/AddrMode-mov-xr.ll
@@ -29,7 +29,7 @@ define void @am3(i16 %i, i8 %a) nounwind {
; CHECK: mov.b r14, bar(r15)
define void @am4(i16 %a) nounwind {
- volatile store i16 %a, i16* inttoptr(i16 32 to i16*)
+ store volatile i16 %a, i16* inttoptr(i16 32 to i16*)
ret void
}
; CHECK: am4:
diff --git a/test/CodeGen/Mips/2010-07-20-Switch.ll b/test/CodeGen/Mips/2010-07-20-Switch.ll
index 5425bdf..83b8a25 100644
--- a/test/CodeGen/Mips/2010-07-20-Switch.ll
+++ b/test/CodeGen/Mips/2010-07-20-Switch.ll
@@ -3,8 +3,8 @@
define i32 @main() nounwind readnone {
entry:
%x = alloca i32, align 4 ; <i32*> [#uses=2]
- volatile store i32 2, i32* %x, align 4
- %0 = volatile load i32* %x, align 4 ; <i32> [#uses=1]
+ store volatile i32 2, i32* %x, align 4
+ %0 = load volatile i32* %x, align 4 ; <i32> [#uses=1]
; CHECK: lui $3, %hi($JTI0_0)
; CHECK: addiu $3, $3, %lo($JTI0_0)
; CHECK: sll $2, $2, 2
diff --git a/test/CodeGen/Mips/mipslopat.ll b/test/CodeGen/Mips/mipslopat.ll
index 0279828..1f433b9 100644
--- a/test/CodeGen/Mips/mipslopat.ll
+++ b/test/CodeGen/Mips/mipslopat.ll
@@ -6,7 +6,7 @@
define void @simple_vol_file() nounwind {
entry:
- %tmp = volatile load i32** @stat_vol_ptr_int, align 4
+ %tmp = load volatile i32** @stat_vol_ptr_int, align 4
%0 = bitcast i32* %tmp to i8*
call void @llvm.prefetch(i8* %0, i32 0, i32 0, i32 1)
%tmp1 = load i32** @stat_ptr_vol_int, align 4
diff --git a/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll b/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll
index 7b6d491..e7a1cf6 100644
--- a/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll
+++ b/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll
@@ -12,7 +12,7 @@ declare void @IODelay(i32)
define i32 @_Z14ProgramByWordsPvyy(i8* %buffer, i64 %Offset, i64 %bufferSize) nounwind {
entry:
- volatile store i8 -1, i8* null, align 1
+ store volatile i8 -1, i8* null, align 1
%tmp28 = icmp eq i8 0, 0 ; <i1> [#uses=1]
br i1 %tmp28, label %bb107, label %bb
@@ -43,7 +43,7 @@ bb68: ; preds = %bb31
%tmp2021.i = trunc i64 %Pos.0.reg2mem.0 to i32 ; <i32> [#uses=1]
%tmp202122.i = inttoptr i32 %tmp2021.i to i8* ; <i8*> [#uses=1]
tail call void @IODelay( i32 500 ) nounwind
- %tmp53.i = volatile load i16* null, align 2 ; <i16> [#uses=2]
+ %tmp53.i = load volatile i16* null, align 2 ; <i16> [#uses=2]
%tmp5455.i = zext i16 %tmp53.i to i32 ; <i32> [#uses=1]
br i1 false, label %bb.i, label %bb65.i
@@ -59,7 +59,7 @@ bb70.i: ; preds = %bb65.i
ret i32 0
_Z24unlock_then_erase_sectory.exit: ; preds = %bb65.i
- volatile store i8 -1, i8* %tmp202122.i, align 1
+ store volatile i8 -1, i8* %tmp202122.i, align 1
%tmp93 = add i64 0, %Pos.0.reg2mem.0 ; <i64> [#uses=2]
%tmp98 = add i64 0, %Offset ; <i64> [#uses=1]
%tmp100 = icmp ugt i64 %tmp98, %tmp93 ; <i1> [#uses=1]
diff --git a/test/CodeGen/Thumb/vargs.ll b/test/CodeGen/Thumb/vargs.ll
index c2ba208..50a1a07 100644
--- a/test/CodeGen/Thumb/vargs.ll
+++ b/test/CodeGen/Thumb/vargs.ll
@@ -13,9 +13,9 @@ entry:
bb: ; preds = %bb, %entry
%a_addr.0 = phi i32 [ %a, %entry ], [ %tmp5, %bb ] ; <i32> [#uses=2]
- %tmp = volatile load i8** %va ; <i8*> [#uses=2]
+ %tmp = load volatile i8** %va ; <i8*> [#uses=2]
%tmp2 = getelementptr i8* %tmp, i32 4 ; <i8*> [#uses=1]
- volatile store i8* %tmp2, i8** %va
+ store volatile i8* %tmp2, i8** %va
%tmp5 = add i32 %a_addr.0, -1 ; <i32> [#uses=1]
%tmp.upgrd.2 = icmp eq i32 %a_addr.0, 1 ; <i1> [#uses=1]
br i1 %tmp.upgrd.2, label %bb7, label %bb
diff --git a/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll b/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll
index bb734ac..fcf1bae 100644
--- a/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll
+++ b/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll
@@ -21,7 +21,7 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-
define void @test(%s1* %this, i32 %format, i32 %w, i32 %h, i32 %levels, i32* %s, i8* %data, i32* nocapture %rowbytes, void (i8*, i8*)* %release, i8* %info) nounwind {
entry:
%tmp1 = getelementptr inbounds %s1* %this, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
- volatile store i32 1, i32* %tmp1, align 4
+ store volatile i32 1, i32* %tmp1, align 4
%tmp12 = getelementptr inbounds %s1* %this, i32 0, i32 1
store i32 %levels, i32* %tmp12, align 4
%tmp13 = getelementptr inbounds %s1* %this, i32 0, i32 3
@@ -46,7 +46,7 @@ entry:
%tmp24 = shl i32 %flags.0, 16
%asmtmp.i.i.i = tail call %0 asm sideeffect "\0A0:\09ldrex $1, [$2]\0A\09orr $1, $1, $3\0A\09strex $0, $1, [$2]\0A\09cmp $0, #0\0A\09bne 0b", "=&r,=&r,r,r,~{memory},~{cc}"(i32* %tmp1, i32 %tmp24) nounwind
%tmp25 = getelementptr inbounds %s1* %this, i32 0, i32 2, i32 0, i32 0
- volatile store i32 1, i32* %tmp25, align 4
+ store volatile i32 1, i32* %tmp25, align 4
%tmp26 = icmp eq i32 %levels, 0
br i1 %tmp26, label %return, label %bb4
diff --git a/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll b/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll
index 90af387..a6234d3 100644
--- a/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll
+++ b/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=x86 | not grep movsd
; RUN: llc < %s -march=x86 | grep movw
; RUN: llc < %s -march=x86 | grep addw
-; These transforms are turned off for volatile loads and stores.
+; These transforms are turned off for load volatiles and stores.
; Check that they weren't turned off for all loads and stores!
@atomic = global double 0.000000e+00 ; <double*> [#uses=1]
diff --git a/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll b/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll
index 8665282..037559e 100644
--- a/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll
+++ b/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll
@@ -8,13 +8,13 @@
define i16 @f(i64 %x, double %y) {
%b = bitcast i64 %x to double ; <double> [#uses=1]
- volatile store double %b, double* @atomic ; one processor operation only
- volatile store double 0.000000e+00, double* @atomic2 ; one processor operation only
+ store volatile double %b, double* @atomic ; one processor operation only
+ store volatile double 0.000000e+00, double* @atomic2 ; one processor operation only
%b2 = bitcast double %y to i64 ; <i64> [#uses=1]
- volatile store i64 %b2, i64* @anything ; may transform to store of double
- %l = volatile load i32* @ioport ; must not narrow
+ store volatile i64 %b2, i64* @anything ; may transform to store of double
+ %l = load volatile i32* @ioport ; must not narrow
%t = trunc i32 %l to i16 ; <i16> [#uses=1]
- %l2 = volatile load i32* @ioport ; must not narrow
+ %l2 = load volatile i32* @ioport ; must not narrow
%tmp = lshr i32 %l2, 16 ; <i32> [#uses=1]
%t2 = trunc i32 %tmp to i16 ; <i16> [#uses=1]
%f = add i16 %t, %t2 ; <i16> [#uses=1]
diff --git a/test/CodeGen/X86/2008-09-29-VolatileBug.ll b/test/CodeGen/X86/2008-09-29-VolatileBug.ll
index 935c4c5..f35245b 100644
--- a/test/CodeGen/X86/2008-09-29-VolatileBug.ll
+++ b/test/CodeGen/X86/2008-09-29-VolatileBug.ll
@@ -6,7 +6,7 @@
define i32 @main() nounwind {
entry:
- %0 = volatile load i32* @g_407, align 4 ; <i32> [#uses=1]
+ %0 = load volatile i32* @g_407, align 4 ; <i32> [#uses=1]
%1 = trunc i32 %0 to i8 ; <i8> [#uses=1]
%2 = tail call i32 @func_45(i8 zeroext %1) nounwind ; <i32> [#uses=0]
ret i32 0
diff --git a/test/CodeGen/X86/2009-01-31-BigShift2.ll b/test/CodeGen/X86/2009-01-31-BigShift2.ll
index 9d24084..3e42553 100644
--- a/test/CodeGen/X86/2009-01-31-BigShift2.ll
+++ b/test/CodeGen/X86/2009-01-31-BigShift2.ll
@@ -6,6 +6,6 @@ define void @test(<8 x double>* %P, i64* %Q) nounwind {
%B = bitcast <8 x double> %A to i512 ; <i512> [#uses=1]
%C = lshr i512 %B, 448 ; <i512> [#uses=1]
%D = trunc i512 %C to i64 ; <i64> [#uses=1]
- volatile store i64 %D, i64* %Q
+ store volatile i64 %D, i64* %Q
ret void
}
diff --git a/test/CodeGen/X86/2009-03-23-MultiUseSched.ll b/test/CodeGen/X86/2009-03-23-MultiUseSched.ll
index 90dabb8..8bbdb0e 100644
--- a/test/CodeGen/X86/2009-03-23-MultiUseSched.ll
+++ b/test/CodeGen/X86/2009-03-23-MultiUseSched.ll
@@ -9,30 +9,30 @@
@X = external global i64 ; <i64*> [#uses=25]
define fastcc i64 @foo() nounwind {
- %tmp = volatile load i64* @X ; <i64> [#uses=7]
- %tmp1 = volatile load i64* @X ; <i64> [#uses=5]
- %tmp2 = volatile load i64* @X ; <i64> [#uses=3]
- %tmp3 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp4 = volatile load i64* @X ; <i64> [#uses=5]
- %tmp5 = volatile load i64* @X ; <i64> [#uses=3]
- %tmp6 = volatile load i64* @X ; <i64> [#uses=2]
- %tmp7 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp8 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp9 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp10 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp11 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp12 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp13 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp14 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp15 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp16 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp17 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp18 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp19 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp20 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp21 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp22 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp23 = volatile load i64* @X ; <i64> [#uses=1]
+ %tmp = load volatile i64* @X ; <i64> [#uses=7]
+ %tmp1 = load volatile i64* @X ; <i64> [#uses=5]
+ %tmp2 = load volatile i64* @X ; <i64> [#uses=3]
+ %tmp3 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp4 = load volatile i64* @X ; <i64> [#uses=5]
+ %tmp5 = load volatile i64* @X ; <i64> [#uses=3]
+ %tmp6 = load volatile i64* @X ; <i64> [#uses=2]
+ %tmp7 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp8 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp9 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp10 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp11 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp12 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp13 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp14 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp15 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp16 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp17 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp18 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp19 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp20 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp21 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp22 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp23 = load volatile i64* @X ; <i64> [#uses=1]
%tmp24 = call i64 @llvm.bswap.i64(i64 %tmp8) ; <i64> [#uses=1]
%tmp25 = add i64 %tmp6, %tmp5 ; <i64> [#uses=1]
%tmp26 = add i64 %tmp25, %tmp4 ; <i64> [#uses=1]
@@ -229,7 +229,7 @@ define fastcc i64 @foo() nounwind {
%tmp217 = add i64 %tmp205, %tmp215 ; <i64> [#uses=1]
%tmp218 = add i64 %tmp217, %tmp211 ; <i64> [#uses=1]
%tmp219 = call i64 @llvm.bswap.i64(i64 %tmp23) ; <i64> [#uses=2]
- volatile store i64 %tmp219, i64* @X, align 8
+ store volatile i64 %tmp219, i64* @X, align 8
%tmp220 = add i64 %tmp203, %tmp190 ; <i64> [#uses=1]
%tmp221 = add i64 %tmp220, %tmp216 ; <i64> [#uses=1]
%tmp222 = add i64 %tmp219, %tmp177 ; <i64> [#uses=1]
diff --git a/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll b/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll
index a5e28c0..c2cd89c 100644
--- a/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll
+++ b/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll
@@ -12,7 +12,7 @@ entry:
br label %bb
bb: ; preds = %bb.i, %bb, %entry
- %2 = volatile load i32* @g_9, align 4 ; <i32> [#uses=2]
+ %2 = load volatile i32* @g_9, align 4 ; <i32> [#uses=2]
%3 = icmp sgt i32 %2, 1 ; <i1> [#uses=1]
%4 = and i1 %3, %1 ; <i1> [#uses=1]
br i1 %4, label %bb.i, label %bb
diff --git a/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll b/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll
index 790fd88..410a42a 100644
--- a/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll
+++ b/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll
@@ -41,18 +41,18 @@ bb3: ; preds = %bb2, %bb
br i1 undef, label %bb5, label %bb4
bb4: ; preds = %bb3
- %17 = volatile load i32* @uint8, align 4 ; <i32> [#uses=0]
+ %17 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0]
br label %bb5
bb5: ; preds = %bb4, %bb3
- %18 = volatile load i32* @uint8, align 4 ; <i32> [#uses=0]
+ %18 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0]
%19 = sext i8 undef to i16 ; <i16> [#uses=1]
%20 = tail call i32 @func_24(i16 zeroext %19, i8 signext 1) nounwind; <i32> [#uses=0]
br i1 undef, label %return, label %bb6.preheader
bb6.preheader: ; preds = %bb5
%21 = sext i8 %p_52 to i32 ; <i32> [#uses=1]
- %22 = volatile load i32* @uint8, align 4 ; <i32> [#uses=0]
+ %22 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0]
%23 = tail call i32 (...)* @safefuncts(i32 %21, i32 1) nounwind; <i32> [#uses=0]
unreachable
diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll
index d0afee6..66def49 100644
--- a/test/CodeGen/X86/block-placement.ll
+++ b/test/CodeGen/X86/block-placement.ll
@@ -690,199 +690,199 @@ define void @many_unanalyzable_branches() {
entry:
br label %0
- %val0 = volatile load float* undef
+ %val0 = load volatile float* undef
%cmp0 = fcmp une float %val0, undef
br i1 %cmp0, label %1, label %0
- %val1 = volatile load float* undef
+ %val1 = load volatile float* undef
%cmp1 = fcmp une float %val1, undef
br i1 %cmp1, label %2, label %1
- %val2 = volatile load float* undef
+ %val2 = load volatile float* undef
%cmp2 = fcmp une float %val2, undef
br i1 %cmp2, label %3, label %2
- %val3 = volatile load float* undef
+ %val3 = load volatile float* undef
%cmp3 = fcmp une float %val3, undef
br i1 %cmp3, label %4, label %3
- %val4 = volatile load float* undef
+ %val4 = load volatile float* undef
%cmp4 = fcmp une float %val4, undef
br i1 %cmp4, label %5, label %4
- %val5 = volatile load float* undef
+ %val5 = load volatile float* undef
%cmp5 = fcmp une float %val5, undef
br i1 %cmp5, label %6, label %5
- %val6 = volatile load float* undef
+ %val6 = load volatile float* undef
%cmp6 = fcmp une float %val6, undef
br i1 %cmp6, label %7, label %6
- %val7 = volatile load float* undef
+ %val7 = load volatile float* undef
%cmp7 = fcmp une float %val7, undef
br i1 %cmp7, label %8, label %7
- %val8 = volatile load float* undef
+ %val8 = load volatile float* undef
%cmp8 = fcmp une float %val8, undef
br i1 %cmp8, label %9, label %8
- %val9 = volatile load float* undef
+ %val9 = load volatile float* undef
%cmp9 = fcmp une float %val9, undef
br i1 %cmp9, label %10, label %9
- %val10 = volatile load float* undef
+ %val10 = load volatile float* undef
%cmp10 = fcmp une float %val10, undef
br i1 %cmp10, label %11, label %10
- %val11 = volatile load float* undef
+ %val11 = load volatile float* undef
%cmp11 = fcmp une float %val11, undef
br i1 %cmp11, label %12, label %11
- %val12 = volatile load float* undef
+ %val12 = load volatile float* undef
%cmp12 = fcmp une float %val12, undef
br i1 %cmp12, label %13, label %12
- %val13 = volatile load float* undef
+ %val13 = load volatile float* undef
%cmp13 = fcmp une float %val13, undef
br i1 %cmp13, label %14, label %13
- %val14 = volatile load float* undef
+ %val14 = load volatile float* undef
%cmp14 = fcmp une float %val14, undef
br i1 %cmp14, label %15, label %14
- %val15 = volatile load float* undef
+ %val15 = load volatile float* undef
%cmp15 = fcmp une float %val15, undef
br i1 %cmp15, label %16, label %15
- %val16 = volatile load float* undef
+ %val16 = load volatile float* undef
%cmp16 = fcmp une float %val16, undef
br i1 %cmp16, label %17, label %16
- %val17 = volatile load float* undef
+ %val17 = load volatile float* undef
%cmp17 = fcmp une float %val17, undef
br i1 %cmp17, label %18, label %17
- %val18 = volatile load float* undef
+ %val18 = load volatile float* undef
%cmp18 = fcmp une float %val18, undef
br i1 %cmp18, label %19, label %18
- %val19 = volatile load float* undef
+ %val19 = load volatile float* undef
%cmp19 = fcmp une float %val19, undef
br i1 %cmp19, label %20, label %19
- %val20 = volatile load float* undef
+ %val20 = load volatile float* undef
%cmp20 = fcmp une float %val20, undef
br i1 %cmp20, label %21, label %20
- %val21 = volatile load float* undef
+ %val21 = load volatile float* undef
%cmp21 = fcmp une float %val21, undef
br i1 %cmp21, label %22, label %21
- %val22 = volatile load float* undef
+ %val22 = load volatile float* undef
%cmp22 = fcmp une float %val22, undef
br i1 %cmp22, label %23, label %22
- %val23 = volatile load float* undef
+ %val23 = load volatile float* undef
%cmp23 = fcmp une float %val23, undef
br i1 %cmp23, label %24, label %23
- %val24 = volatile load float* undef
+ %val24 = load volatile float* undef
%cmp24 = fcmp une float %val24, undef
br i1 %cmp24, label %25, label %24
- %val25 = volatile load float* undef
+ %val25 = load volatile float* undef
%cmp25 = fcmp une float %val25, undef
br i1 %cmp25, label %26, label %25
- %val26 = volatile load float* undef
+ %val26 = load volatile float* undef
%cmp26 = fcmp une float %val26, undef
br i1 %cmp26, label %27, label %26
- %val27 = volatile load float* undef
+ %val27 = load volatile float* undef
%cmp27 = fcmp une float %val27, undef
br i1 %cmp27, label %28, label %27
- %val28 = volatile load float* undef
+ %val28 = load volatile float* undef
%cmp28 = fcmp une float %val28, undef
br i1 %cmp28, label %29, label %28
- %val29 = volatile load float* undef
+ %val29 = load volatile float* undef
%cmp29 = fcmp une float %val29, undef
br i1 %cmp29, label %30, label %29
- %val30 = volatile load float* undef
+ %val30 = load volatile float* undef
%cmp30 = fcmp une float %val30, undef
br i1 %cmp30, label %31, label %30
- %val31 = volatile load float* undef
+ %val31 = load volatile float* undef
%cmp31 = fcmp une float %val31, undef
br i1 %cmp31, label %32, label %31
- %val32 = volatile load float* undef
+ %val32 = load volatile float* undef
%cmp32 = fcmp une float %val32, undef
br i1 %cmp32, label %33, label %32
- %val33 = volatile load float* undef
+ %val33 = load volatile float* undef
%cmp33 = fcmp une float %val33, undef
br i1 %cmp33, label %34, label %33
- %val34 = volatile load float* undef
+ %val34 = load volatile float* undef
%cmp34 = fcmp une float %val34, undef
br i1 %cmp34, label %35, label %34
- %val35 = volatile load float* undef
+ %val35 = load volatile float* undef
%cmp35 = fcmp une float %val35, undef
br i1 %cmp35, label %36, label %35
- %val36 = volatile load float* undef
+ %val36 = load volatile float* undef
%cmp36 = fcmp une float %val36, undef
br i1 %cmp36, label %37, label %36
- %val37 = volatile load float* undef
+ %val37 = load volatile float* undef
%cmp37 = fcmp une float %val37, undef
br i1 %cmp37, label %38, label %37
- %val38 = volatile load float* undef
+ %val38 = load volatile float* undef
%cmp38 = fcmp une float %val38, undef
br i1 %cmp38, label %39, label %38
- %val39 = volatile load float* undef
+ %val39 = load volatile float* undef
%cmp39 = fcmp une float %val39, undef
br i1 %cmp39, label %40, label %39
- %val40 = volatile load float* undef
+ %val40 = load volatile float* undef
%cmp40 = fcmp une float %val40, undef
br i1 %cmp40, label %41, label %40
- %val41 = volatile load float* undef
+ %val41 = load volatile float* undef
%cmp41 = fcmp une float %val41, undef
br i1 %cmp41, label %42, label %41
- %val42 = volatile load float* undef
+ %val42 = load volatile float* undef
%cmp42 = fcmp une float %val42, undef
br i1 %cmp42, label %43, label %42
- %val43 = volatile load float* undef
+ %val43 = load volatile float* undef
%cmp43 = fcmp une float %val43, undef
br i1 %cmp43, label %44, label %43
- %val44 = volatile load float* undef
+ %val44 = load volatile float* undef
%cmp44 = fcmp une float %val44, undef
br i1 %cmp44, label %45, label %44
- %val45 = volatile load float* undef
+ %val45 = load volatile float* undef
%cmp45 = fcmp une float %val45, undef
br i1 %cmp45, label %46, label %45
- %val46 = volatile load float* undef
+ %val46 = load volatile float* undef
%cmp46 = fcmp une float %val46, undef
br i1 %cmp46, label %47, label %46
- %val47 = volatile load float* undef
+ %val47 = load volatile float* undef
%cmp47 = fcmp une float %val47, undef
br i1 %cmp47, label %48, label %47
- %val48 = volatile load float* undef
+ %val48 = load volatile float* undef
%cmp48 = fcmp une float %val48, undef
br i1 %cmp48, label %49, label %48
- %val49 = volatile load float* undef
+ %val49 = load volatile float* undef
%cmp49 = fcmp une float %val49, undef
br i1 %cmp49, label %50, label %49
- %val50 = volatile load float* undef
+ %val50 = load volatile float* undef
%cmp50 = fcmp une float %val50, undef
br i1 %cmp50, label %51, label %50
- %val51 = volatile load float* undef
+ %val51 = load volatile float* undef
%cmp51 = fcmp une float %val51, undef
br i1 %cmp51, label %52, label %51
- %val52 = volatile load float* undef
+ %val52 = load volatile float* undef
%cmp52 = fcmp une float %val52, undef
br i1 %cmp52, label %53, label %52
- %val53 = volatile load float* undef
+ %val53 = load volatile float* undef
%cmp53 = fcmp une float %val53, undef
br i1 %cmp53, label %54, label %53
- %val54 = volatile load float* undef
+ %val54 = load volatile float* undef
%cmp54 = fcmp une float %val54, undef
br i1 %cmp54, label %55, label %54
- %val55 = volatile load float* undef
+ %val55 = load volatile float* undef
%cmp55 = fcmp une float %val55, undef
br i1 %cmp55, label %56, label %55
- %val56 = volatile load float* undef
+ %val56 = load volatile float* undef
%cmp56 = fcmp une float %val56, undef
br i1 %cmp56, label %57, label %56
- %val57 = volatile load float* undef
+ %val57 = load volatile float* undef
%cmp57 = fcmp une float %val57, undef
br i1 %cmp57, label %58, label %57
- %val58 = volatile load float* undef
+ %val58 = load volatile float* undef
%cmp58 = fcmp une float %val58, undef
br i1 %cmp58, label %59, label %58
- %val59 = volatile load float* undef
+ %val59 = load volatile float* undef
%cmp59 = fcmp une float %val59, undef
br i1 %cmp59, label %60, label %59
- %val60 = volatile load float* undef
+ %val60 = load volatile float* undef
%cmp60 = fcmp une float %val60, undef
br i1 %cmp60, label %61, label %60
- %val61 = volatile load float* undef
+ %val61 = load volatile float* undef
%cmp61 = fcmp une float %val61, undef
br i1 %cmp61, label %62, label %61
- %val62 = volatile load float* undef
+ %val62 = load volatile float* undef
%cmp62 = fcmp une float %val62, undef
br i1 %cmp62, label %63, label %62
- %val63 = volatile load float* undef
+ %val63 = load volatile float* undef
%cmp63 = fcmp une float %val63, undef
br i1 %cmp63, label %64, label %63
- %val64 = volatile load float* undef
+ %val64 = load volatile float* undef
%cmp64 = fcmp une float %val64, undef
br i1 %cmp64, label %65, label %64
diff --git a/test/CodeGen/X86/cmov.ll b/test/CodeGen/X86/cmov.ll
index 7a8d6e6..2e7ffbf 100644
--- a/test/CodeGen/X86/cmov.ll
+++ b/test/CodeGen/X86/cmov.ll
@@ -84,7 +84,7 @@ entry:
br i1 %3, label %func_4.exit.i, label %bb.i.i.i
bb.i.i.i: ; preds = %entry
- %4 = volatile load i8* @g_100, align 1 ; <i8> [#uses=0]
+ %4 = load volatile i8* @g_100, align 1 ; <i8> [#uses=0]
br label %func_4.exit.i
; CHECK: test4:
@@ -101,7 +101,7 @@ func_4.exit.i: ; preds = %bb.i.i.i, %entry
br i1 %brmerge.i, label %func_1.exit, label %bb.i.i
bb.i.i: ; preds = %func_4.exit.i
- %5 = volatile load i8* @g_100, align 1 ; <i8> [#uses=0]
+ %5 = load volatile i8* @g_100, align 1 ; <i8> [#uses=0]
br label %func_1.exit
func_1.exit: ; preds = %bb.i.i, %func_4.exit.i
diff --git a/test/CodeGen/X86/coalescer-commute1.ll b/test/CodeGen/X86/coalescer-commute1.ll
index 8aa0bfd..d9e0778 100644
--- a/test/CodeGen/X86/coalescer-commute1.ll
+++ b/test/CodeGen/X86/coalescer-commute1.ll
@@ -21,6 +21,6 @@ bb: ; preds = %bb, %entry
br i1 %exitcond, label %bb13, label %bb
bb13: ; preds = %bb
- volatile store float %tmp6, float* @G, align 4
+ store volatile float %tmp6, float* @G, align 4
ret void
}
diff --git a/test/CodeGen/X86/crash.ll b/test/CodeGen/X86/crash.ll
index 1531457..cf6e27d 100644
--- a/test/CodeGen/X86/crash.ll
+++ b/test/CodeGen/X86/crash.ll
@@ -6,16 +6,16 @@
; Chain and flag folding issues.
define i32 @test1() nounwind ssp {
entry:
- %tmp5.i = volatile load i32* undef ; <i32> [#uses=1]
+ %tmp5.i = load volatile i32* undef ; <i32> [#uses=1]
%conv.i = zext i32 %tmp5.i to i64 ; <i64> [#uses=1]
- %tmp12.i = volatile load i32* undef ; <i32> [#uses=1]
+ %tmp12.i = load volatile i32* undef ; <i32> [#uses=1]
%conv13.i = zext i32 %tmp12.i to i64 ; <i64> [#uses=1]
%shl.i = shl i64 %conv13.i, 32 ; <i64> [#uses=1]
%or.i = or i64 %shl.i, %conv.i ; <i64> [#uses=1]
%add16.i = add i64 %or.i, 256 ; <i64> [#uses=1]
%shr.i = lshr i64 %add16.i, 8 ; <i64> [#uses=1]
%conv19.i = trunc i64 %shr.i to i32 ; <i32> [#uses=1]
- volatile store i32 %conv19.i, i32* undef
+ store volatile i32 %conv19.i, i32* undef
ret i32 undef
}
diff --git a/test/CodeGen/X86/fp-stack-ret-conv.ll b/test/CodeGen/X86/fp-stack-ret-conv.ll
index f220b24..3e26141 100644
--- a/test/CodeGen/X86/fp-stack-ret-conv.ll
+++ b/test/CodeGen/X86/fp-stack-ret-conv.ll
@@ -10,7 +10,7 @@ entry:
%tmp13 = tail call double @foo()
%tmp1314 = fptrunc double %tmp13 to float ; <float> [#uses=1]
%tmp3940 = fpext float %tmp1314 to double ; <double> [#uses=1]
- volatile store double %tmp3940, double* %b
+ store volatile double %tmp3940, double* %b
ret void
}
diff --git a/test/CodeGen/X86/loop-strength-reduce5.ll b/test/CodeGen/X86/loop-strength-reduce5.ll
index b07eeb6..d50a668 100644
--- a/test/CodeGen/X86/loop-strength-reduce5.ll
+++ b/test/CodeGen/X86/loop-strength-reduce5.ll
@@ -11,9 +11,9 @@ entry:
bb: ; preds = %bb, %entry
%i.014.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%tmp1 = trunc i32 %i.014.0 to i16 ; <i16> [#uses=2]
- volatile store i16 %tmp1, i16* @X, align 2
+ store volatile i16 %tmp1, i16* @X, align 2
%tmp34 = shl i16 %tmp1, 2 ; <i16> [#uses=1]
- volatile store i16 %tmp34, i16* @Y, align 2
+ store volatile i16 %tmp34, i16* @Y, align 2
%indvar.next = add i32 %i.014.0, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
br i1 %exitcond, label %return, label %bb
diff --git a/test/CodeGen/X86/lsr-nonaffine.ll b/test/CodeGen/X86/lsr-nonaffine.ll
index d0d2bbd..d825b5a 100644
--- a/test/CodeGen/X86/lsr-nonaffine.ll
+++ b/test/CodeGen/X86/lsr-nonaffine.ll
@@ -19,7 +19,7 @@ entry:
loop:
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
- volatile store i64 %i, i64* %p
+ store volatile i64 %i, i64* %p
%i.next = add i64 %i, %s
%c = icmp slt i64 %i.next, %n
br i1 %c, label %loop, label %exit
diff --git a/test/CodeGen/X86/lsr-sort.ll b/test/CodeGen/X86/lsr-sort.ll
index 1f3b59a..b85ddeb 100644
--- a/test/CodeGen/X86/lsr-sort.ll
+++ b/test/CodeGen/X86/lsr-sort.ll
@@ -12,7 +12,7 @@ entry:
bb: ; preds = %bb, %entry
%i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%1 = trunc i32 %i.03 to i16 ; <i16> [#uses=1]
- volatile store i16 %1, i16* @X, align 2
+ store volatile i16 %1, i16* @X, align 2
%indvar.next = add i32 %i.03, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
br i1 %exitcond, label %return, label %bb
diff --git a/test/CodeGen/X86/nancvt.ll b/test/CodeGen/X86/nancvt.ll
index 82b7331..8036710 100644
--- a/test/CodeGen/X86/nancvt.ll
+++ b/test/CodeGen/X86/nancvt.ll
@@ -52,8 +52,8 @@ bb: ; preds = %bb23
%tmp17 = ashr i64 %tmp16, %.cast ; <i64> [#uses=1]
%tmp1718 = trunc i64 %tmp17 to i32 ; <i32> [#uses=1]
%tmp19 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
- volatile store i32 %tmp1718, i32* @var
- volatile store i32 %tmp13, i32* @var
+ store volatile i32 %tmp1718, i32* @var
+ store volatile i32 %tmp13, i32* @var
%tmp21 = load i32* %i, align 4 ; <i32> [#uses=1]
%tmp22 = add i32 %tmp21, 1 ; <i32> [#uses=1]
store i32 %tmp22, i32* %i, align 4
@@ -86,7 +86,7 @@ bb28: ; preds = %bb46
%tmp3940 = bitcast float* %tmp39 to i32* ; <i32*> [#uses=1]
%tmp41 = load i32* %tmp3940, align 4 ; <i32> [#uses=1]
%tmp42 = getelementptr [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1]
- volatile store i32 %tmp41, i32* @var
+ store volatile i32 %tmp41, i32* @var
%tmp44 = load i32* %i, align 4 ; <i32> [#uses=1]
%tmp45 = add i32 %tmp44, 1 ; <i32> [#uses=1]
store i32 %tmp45, i32* %i, align 4
@@ -127,8 +127,8 @@ bb52: ; preds = %bb78
%tmp72 = ashr i64 %tmp70, %.cast71 ; <i64> [#uses=1]
%tmp7273 = trunc i64 %tmp72 to i32 ; <i32> [#uses=1]
%tmp74 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
- volatile store i32 %tmp7273, i32* @var
- volatile store i32 %tmp66, i32* @var
+ store volatile i32 %tmp7273, i32* @var
+ store volatile i32 %tmp66, i32* @var
%tmp76 = load i32* %i, align 4 ; <i32> [#uses=1]
%tmp77 = add i32 %tmp76, 1 ; <i32> [#uses=1]
store i32 %tmp77, i32* %i, align 4
@@ -161,7 +161,7 @@ bb84: ; preds = %bb101
%tmp9495 = bitcast float* %tmp94 to i32* ; <i32*> [#uses=1]
%tmp96 = load i32* %tmp9495, align 4 ; <i32> [#uses=1]
%tmp97 = getelementptr [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1]
- volatile store i32 %tmp96, i32* @var
+ store volatile i32 %tmp96, i32* @var
%tmp99 = load i32* %i, align 4 ; <i32> [#uses=1]
%tmp100 = add i32 %tmp99, 1 ; <i32> [#uses=1]
store i32 %tmp100, i32* %i, align 4
diff --git a/test/CodeGen/X86/narrow-shl-load.ll b/test/CodeGen/X86/narrow-shl-load.ll
index ef27cbc..7822453 100644
--- a/test/CodeGen/X86/narrow-shl-load.ll
+++ b/test/CodeGen/X86/narrow-shl-load.ll
@@ -67,7 +67,7 @@ declare void @exit(i32) noreturn
; DAG Combiner can't fold this into a load of the 1'th byte.
; PR8757
define i32 @test3(i32 *%P) nounwind ssp {
- volatile store i32 128, i32* %P
+ store volatile i32 128, i32* %P
%tmp4.pre = load i32* %P
%phitmp = trunc i32 %tmp4.pre to i16
%phitmp13 = shl i16 %phitmp, 8
diff --git a/test/CodeGen/X86/overlap-shift.ll b/test/CodeGen/X86/overlap-shift.ll
index c1fc041..d185af1 100644
--- a/test/CodeGen/X86/overlap-shift.ll
+++ b/test/CodeGen/X86/overlap-shift.ll
@@ -13,7 +13,7 @@
define i32 @test1(i32 %X) {
%Z = shl i32 %X, 2 ; <i32> [#uses=1]
- volatile store i32 %Z, i32* @G
+ store volatile i32 %Z, i32* @G
ret i32 %X
}
diff --git a/test/CodeGen/X86/pr1505b.ll b/test/CodeGen/X86/pr1505b.ll
index 945ec4c..9b0ef83 100644
--- a/test/CodeGen/X86/pr1505b.ll
+++ b/test/CodeGen/X86/pr1505b.ll
@@ -33,7 +33,7 @@ declare i32 @__cxa_atexit(void (i8*)*, i8*, i8*)
define i32 @main() {
entry:
; CHECK: flds
- %tmp6 = volatile load float* @a ; <float> [#uses=1]
+ %tmp6 = load volatile float* @a ; <float> [#uses=1]
; CHECK: fstps (%esp)
; CHECK: tanf
%tmp9 = tail call float @tanf( float %tmp6 ) ; <float> [#uses=1]
@@ -41,7 +41,7 @@ entry:
; CHECK: fstp
; CHECK: fldl
- %tmp12 = volatile load double* @b ; <double> [#uses=1]
+ %tmp12 = load volatile double* @b ; <double> [#uses=1]
; CHECK: fstpl (%esp)
; CHECK: tan
%tmp13 = tail call double @tan( double %tmp12 ) ; <double> [#uses=1]
diff --git a/test/CodeGen/X86/pr2182.ll b/test/CodeGen/X86/pr2182.ll
index 2a8bb35..02a3605 100644
--- a/test/CodeGen/X86/pr2182.ll
+++ b/test/CodeGen/X86/pr2182.ll
@@ -15,17 +15,17 @@ define void @loop_2() nounwind {
; CHECK-NEXT: addl $3, (%{{.*}})
; CHECK-NEXT: ret
- %tmp = volatile load i32* @x, align 4 ; <i32> [#uses=1]
+ %tmp = load volatile i32* @x, align 4 ; <i32> [#uses=1]
%tmp1 = add i32 %tmp, 3 ; <i32> [#uses=1]
- volatile store i32 %tmp1, i32* @x, align 4
- %tmp.1 = volatile load i32* @x, align 4 ; <i32> [#uses=1]
+ store volatile i32 %tmp1, i32* @x, align 4
+ %tmp.1 = load volatile i32* @x, align 4 ; <i32> [#uses=1]
%tmp1.1 = add i32 %tmp.1, 3 ; <i32> [#uses=1]
- volatile store i32 %tmp1.1, i32* @x, align 4
- %tmp.2 = volatile load i32* @x, align 4 ; <i32> [#uses=1]
+ store volatile i32 %tmp1.1, i32* @x, align 4
+ %tmp.2 = load volatile i32* @x, align 4 ; <i32> [#uses=1]
%tmp1.2 = add i32 %tmp.2, 3 ; <i32> [#uses=1]
- volatile store i32 %tmp1.2, i32* @x, align 4
- %tmp.3 = volatile load i32* @x, align 4 ; <i32> [#uses=1]
+ store volatile i32 %tmp1.2, i32* @x, align 4
+ %tmp.3 = load volatile i32* @x, align 4 ; <i32> [#uses=1]
%tmp1.3 = add i32 %tmp.3, 3 ; <i32> [#uses=1]
- volatile store i32 %tmp1.3, i32* @x, align 4
+ store volatile i32 %tmp1.3, i32* @x, align 4
ret void
}
diff --git a/test/CodeGen/X86/sext-subreg.ll b/test/CodeGen/X86/sext-subreg.ll
index b2b9f81..a128af9 100644
--- a/test/CodeGen/X86/sext-subreg.ll
+++ b/test/CodeGen/X86/sext-subreg.ll
@@ -8,10 +8,10 @@ define i64 @t(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind {
; CHECK: movl %eax
%C = add i64 %A, %B
%D = trunc i64 %C to i32
- volatile store i32 %D, i32* %P
+ store volatile i32 %D, i32* %P
%E = shl i64 %C, 32
%F = ashr i64 %E, 32
- volatile store i64 %F, i64 *%P2
- volatile store i32 %D, i32* %P
+ store volatile i64 %F, i64 *%P2
+ store volatile i32 %D, i32* %P
ret i64 undef
}
diff --git a/test/CodeGen/X86/stack-align.ll b/test/CodeGen/X86/stack-align.ll
index 793c026..f6c13ec 100644
--- a/test/CodeGen/X86/stack-align.ll
+++ b/test/CodeGen/X86/stack-align.ll
@@ -11,13 +11,13 @@ define void @test({ double, double }* byval %z, double* %P) nounwind {
entry:
%tmp3 = load double* @G, align 16 ; <double> [#uses=1]
%tmp4 = tail call double @fabs( double %tmp3 ) ; <double> [#uses=1]
- volatile store double %tmp4, double* %P
+ store volatile double %tmp4, double* %P
%tmp = getelementptr { double, double }* %z, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp1 = volatile load double* %tmp, align 8 ; <double> [#uses=1]
+ %tmp1 = load volatile double* %tmp, align 8 ; <double> [#uses=1]
%tmp2 = tail call double @fabs( double %tmp1 ) ; <double> [#uses=1]
; CHECK: andpd{{.*}}4(%esp), %xmm
%tmp6 = fadd double %tmp4, %tmp2 ; <double> [#uses=1]
- volatile store double %tmp6, double* %P, align 8
+ store volatile double %tmp6, double* %P, align 8
ret void
}
diff --git a/test/CodeGen/X86/store-empty-member.ll b/test/CodeGen/X86/store-empty-member.ll
index 37f86c6..aea85b9 100644
--- a/test/CodeGen/X86/store-empty-member.ll
+++ b/test/CodeGen/X86/store-empty-member.ll
@@ -9,6 +9,6 @@
define void @foo() nounwind {
%1 = alloca %testType
- volatile store %testType {i32 1, [0 x i32] zeroinitializer, i32 2}, %testType* %1
+ store volatile %testType {i32 1, [0 x i32] zeroinitializer, i32 2}, %testType* %1
ret void
}
diff --git a/test/CodeGen/X86/tail-opts.ll b/test/CodeGen/X86/tail-opts.ll
index d6c16ca..f1b9f20 100644
--- a/test/CodeGen/X86/tail-opts.ll
+++ b/test/CodeGen/X86/tail-opts.ll
@@ -314,7 +314,7 @@ bby:
]
bb7:
- volatile store i32 0, i32* @XYZ
+ store volatile i32 0, i32* @XYZ
unreachable
bbx:
@@ -323,7 +323,7 @@ bbx:
]
bb12:
- volatile store i32 0, i32* @XYZ
+ store volatile i32 0, i32* @XYZ
unreachable
return:
@@ -352,8 +352,8 @@ bby:
]
bb7:
- volatile store i32 0, i32* @XYZ
- volatile store i32 1, i32* @XYZ
+ store volatile i32 0, i32* @XYZ
+ store volatile i32 1, i32* @XYZ
unreachable
bbx:
@@ -362,8 +362,8 @@ bbx:
]
bb12:
- volatile store i32 0, i32* @XYZ
- volatile store i32 1, i32* @XYZ
+ store volatile i32 0, i32* @XYZ
+ store volatile i32 1, i32* @XYZ
unreachable
return:
@@ -390,8 +390,8 @@ bby:
]
bb7:
- volatile store i32 0, i32* @XYZ
- volatile store i32 1, i32* @XYZ
+ store volatile i32 0, i32* @XYZ
+ store volatile i32 1, i32* @XYZ
unreachable
bbx:
@@ -400,8 +400,8 @@ bbx:
]
bb12:
- volatile store i32 0, i32* @XYZ
- volatile store i32 1, i32* @XYZ
+ store volatile i32 0, i32* @XYZ
+ store volatile i32 1, i32* @XYZ
unreachable
return:
diff --git a/test/CodeGen/X86/twoaddr-lea.ll b/test/CodeGen/X86/twoaddr-lea.ll
index a1d797f..b7fe039 100644
--- a/test/CodeGen/X86/twoaddr-lea.ll
+++ b/test/CodeGen/X86/twoaddr-lea.ll
@@ -14,7 +14,7 @@ define i32 @test1(i32 %X) nounwind {
; CHECK-NOT: mov
; CHECK: leal 1(%rdi)
%Z = add i32 %X, 1
- volatile store i32 %Z, i32* @G
+ store volatile i32 %Z, i32* @G
ret i32 %X
}
diff --git a/test/CodeGen/X86/vec_shuffle-23.ll b/test/CodeGen/X86/vec_shuffle-23.ll
index 05a3a1e..2468735 100644
--- a/test/CodeGen/X86/vec_shuffle-23.ll
+++ b/test/CodeGen/X86/vec_shuffle-23.ll
@@ -5,7 +5,7 @@ define i32 @t() nounwind {
entry:
%a = alloca <4 x i32> ; <<4 x i32>*> [#uses=2]
%b = alloca <4 x i32> ; <<4 x i32>*> [#uses=5]
- volatile store <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a
+ store volatile <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a
%tmp = load <4 x i32>* %a ; <<4 x i32>> [#uses=1]
store <4 x i32> %tmp, <4 x i32>* %b
%tmp1 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1]
diff --git a/test/CodeGen/X86/vec_shuffle-24.ll b/test/CodeGen/X86/vec_shuffle-24.ll
index 1b104de..d038daf 100644
--- a/test/CodeGen/X86/vec_shuffle-24.ll
+++ b/test/CodeGen/X86/vec_shuffle-24.ll
@@ -5,7 +5,7 @@ entry:
; CHECK: punpckldq
%a = alloca <4 x i32> ; <<4 x i32>*> [#uses=2]
%b = alloca <4 x i32> ; <<4 x i32>*> [#uses=5]
- volatile store <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a
+ store volatile <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a
%tmp = load <4 x i32>* %a ; <<4 x i32>> [#uses=1]
store <4 x i32> %tmp, <4 x i32>* %b
%tmp1 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1]
diff --git a/test/CodeGen/X86/volatile.ll b/test/CodeGen/X86/volatile.ll
index 2e5742a..1a82014 100644
--- a/test/CodeGen/X86/volatile.ll
+++ b/test/CodeGen/X86/volatile.ll
@@ -4,14 +4,14 @@
@x = external global double
define void @foo() nounwind {
- %a = volatile load double* @x
- volatile store double 0.0, double* @x
- volatile store double 0.0, double* @x
- %b = volatile load double* @x
+ %a = load volatile double* @x
+ store volatile double 0.0, double* @x
+ store volatile double 0.0, double* @x
+ %b = load volatile double* @x
ret void
}
define void @bar() nounwind {
- %c = volatile load double* @x
+ %c = load volatile double* @x
ret void
}
diff --git a/test/CodeGen/XCore/licm-ldwcp.ll b/test/CodeGen/XCore/licm-ldwcp.ll
index 4884f70..794c6bb 100644
--- a/test/CodeGen/XCore/licm-ldwcp.ll
+++ b/test/CodeGen/XCore/licm-ldwcp.ll
@@ -13,6 +13,6 @@ entry:
br label %bb
bb: ; preds = %bb, %entry
- volatile store i32 525509670, i32* %p, align 4
+ store volatile i32 525509670, i32* %p, align 4
br label %bb
}
diff --git a/test/CodeGen/XCore/scavenging.ll b/test/CodeGen/XCore/scavenging.ll
index 3181e96..5b612d0 100644
--- a/test/CodeGen/XCore/scavenging.ll
+++ b/test/CodeGen/XCore/scavenging.ll
@@ -18,32 +18,32 @@ entry:
%x = alloca [100 x i32], align 4 ; <[100 x i32]*> [#uses=2]
%0 = load i32* @size, align 4 ; <i32> [#uses=1]
%1 = alloca i32, i32 %0, align 4 ; <i32*> [#uses=1]
- %2 = volatile load i32* @g0, align 4 ; <i32> [#uses=1]
- %3 = volatile load i32* @g1, align 4 ; <i32> [#uses=1]
- %4 = volatile load i32* @g2, align 4 ; <i32> [#uses=1]
- %5 = volatile load i32* @g3, align 4 ; <i32> [#uses=1]
- %6 = volatile load i32* @g4, align 4 ; <i32> [#uses=1]
- %7 = volatile load i32* @g5, align 4 ; <i32> [#uses=1]
- %8 = volatile load i32* @g6, align 4 ; <i32> [#uses=1]
- %9 = volatile load i32* @g7, align 4 ; <i32> [#uses=1]
- %10 = volatile load i32* @g8, align 4 ; <i32> [#uses=1]
- %11 = volatile load i32* @g9, align 4 ; <i32> [#uses=1]
- %12 = volatile load i32* @g10, align 4 ; <i32> [#uses=1]
- %13 = volatile load i32* @g11, align 4 ; <i32> [#uses=2]
+ %2 = load volatile i32* @g0, align 4 ; <i32> [#uses=1]
+ %3 = load volatile i32* @g1, align 4 ; <i32> [#uses=1]
+ %4 = load volatile i32* @g2, align 4 ; <i32> [#uses=1]
+ %5 = load volatile i32* @g3, align 4 ; <i32> [#uses=1]
+ %6 = load volatile i32* @g4, align 4 ; <i32> [#uses=1]
+ %7 = load volatile i32* @g5, align 4 ; <i32> [#uses=1]
+ %8 = load volatile i32* @g6, align 4 ; <i32> [#uses=1]
+ %9 = load volatile i32* @g7, align 4 ; <i32> [#uses=1]
+ %10 = load volatile i32* @g8, align 4 ; <i32> [#uses=1]
+ %11 = load volatile i32* @g9, align 4 ; <i32> [#uses=1]
+ %12 = load volatile i32* @g10, align 4 ; <i32> [#uses=1]
+ %13 = load volatile i32* @g11, align 4 ; <i32> [#uses=2]
%14 = getelementptr [100 x i32]* %x, i32 0, i32 50 ; <i32*> [#uses=1]
store i32 %13, i32* %14, align 4
- volatile store i32 %13, i32* @g11, align 4
- volatile store i32 %12, i32* @g10, align 4
- volatile store i32 %11, i32* @g9, align 4
- volatile store i32 %10, i32* @g8, align 4
- volatile store i32 %9, i32* @g7, align 4
- volatile store i32 %8, i32* @g6, align 4
- volatile store i32 %7, i32* @g5, align 4
- volatile store i32 %6, i32* @g4, align 4
- volatile store i32 %5, i32* @g3, align 4
- volatile store i32 %4, i32* @g2, align 4
- volatile store i32 %3, i32* @g1, align 4
- volatile store i32 %2, i32* @g0, align 4
+ store volatile i32 %13, i32* @g11, align 4
+ store volatile i32 %12, i32* @g10, align 4
+ store volatile i32 %11, i32* @g9, align 4
+ store volatile i32 %10, i32* @g8, align 4
+ store volatile i32 %9, i32* @g7, align 4
+ store volatile i32 %8, i32* @g6, align 4
+ store volatile i32 %7, i32* @g5, align 4
+ store volatile i32 %6, i32* @g4, align 4
+ store volatile i32 %5, i32* @g3, align 4
+ store volatile i32 %4, i32* @g2, align 4
+ store volatile i32 %3, i32* @g1, align 4
+ store volatile i32 %2, i32* @g0, align 4
%x1 = getelementptr [100 x i32]* %x, i32 0, i32 0 ; <i32*> [#uses=1]
call void @g(i32* %x1, i32* %1) nounwind
ret void