From f03bb260c90ad013aa4e55af36382875011c95b8 Mon Sep 17 00:00:00 2001
From: Eli Friedman
Date: Fri, 12 Aug 2011 22:50:01 +0000
Subject: Move "atomic" and "volatile" designations on instructions after the
opcode of the instruction.
Note that this change affects the existing non-atomic load and store
instructions; the parser now accepts both forms, and the change is noted
in the release notes.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@137527 91177308-0d34-0410-b5e6-96231b3b80d8
---
docs/LangRef.html | 12 +--
docs/ReleaseNotes.html | 4 +
lib/AsmParser/LLParser.cpp | 99 +++++++++++++++-------
lib/AsmParser/LLParser.h | 10 +--
lib/VMCore/AsmWriter.cpp | 22 ++---
test/Assembler/atomic.ll | 26 ++++++
test/Transforms/DeadArgElim/deadexternal.ll | 2 +-
test/Transforms/DeadStoreElimination/simple.ll | 8 +-
test/Transforms/EarlyCSE/basic.ll | 10 +--
.../GlobalOpt/2008-01-29-VolatileGlobal.ll | 2 +-
.../InstCombine/2008-04-28-VolatileStore.ll | 2 +-
.../2008-04-29-VolatileLoadDontMerge.ll | 2 +-
.../InstCombine/2008-04-29-VolatileLoadMerge.ll | 2 +-
.../InstCombine/2008-07-08-VolatileLoadMerge.ll | 2 +-
test/Transforms/InstCombine/extractvalue.ll | 2 +-
test/Transforms/InstCombine/intrinsics.ll | 12 +--
test/Transforms/InstCombine/volatile_store.ll | 4 +-
.../JumpThreading/no-irreducible-loops.ll | 2 +-
test/Transforms/LICM/2007-05-22-VolatileSink.ll | 2 +-
.../LICM/2011-04-06-HoistMissedASTUpdate.ll | 2 +-
test/Transforms/LICM/scalar_promote.ll | 2 +-
test/Transforms/ObjCARC/contract-storestrong.ll | 4 +-
test/Transforms/ScalarRepl/volatile.ll | 4 +-
.../SimplifyCFG/trapping-load-unreachable.ll | 8 +-
test/Transforms/SimplifyLibCalls/memcmp.ll | 22 ++---
25 files changed, 166 insertions(+), 101 deletions(-)
create mode 100644 test/Assembler/atomic.ll
diff --git a/docs/LangRef.html b/docs/LangRef.html
index 725691c..95cbad0 100644
--- a/docs/LangRef.html
+++ b/docs/LangRef.html
@@ -4572,8 +4572,8 @@ that the invoke/unwind semantics are likely to change in future versions.
Syntax:
- <result> = [volatile] load <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>]
- <result> = atomic [volatile] load <ty>* <pointer> [singlethread] <ordering>, align <alignment>
+ <result> = load [volatile] <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>]
+ <result> = load atomic [volatile] <ty>* <pointer> [singlethread] <ordering>, align <alignment>
!<index> = !{ i32 1 }
@@ -4644,8 +4644,8 @@ that the invoke/unwind semantics are likely to change in future versions.
Syntax:
- [volatile] store <ty> <value>, <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>] ; yields {void}
- atomic [volatile] store <ty> <value>, <ty>* <pointer> [singlethread] <ordering>, align <alignment> ; yields {void}
+ store [volatile] <ty> <value>, <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>] ; yields {void}
+ store atomic [volatile] <ty> <value>, <ty>* <pointer> [singlethread] <ordering>, align <alignment> ; yields {void}
Overview:
@@ -4774,7 +4774,7 @@ thread. (This is useful for interacting with signal handlers.)
Syntax:
- [volatile] cmpxchg <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering> ; yields {ty}
+ cmpxchg [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering> ; yields {ty}
Overview:
@@ -4857,7 +4857,7 @@ done:
Syntax:
- [volatile] atomicrmw <operation> <ty>* <pointer>, <ty> <value> [singlethread] <ordering> ; yields {ty}
+ atomicrmw [volatile] <operation> <ty>* <pointer>, <ty> <value> [singlethread] <ordering> ; yields {ty}
Overview:
diff --git a/docs/ReleaseNotes.html b/docs/ReleaseNotes.html
index 726729a..2585554 100644
--- a/docs/ReleaseNotes.html
+++ b/docs/ReleaseNotes.html
@@ -583,6 +583,10 @@ it run faster:
- The
LowerSetJmp
pass wasn't used effectively by any
target and has been removed.
+ - The syntax of volatile loads and stores in IR has been changed to
+ "
load volatile
"/"store volatile
". The old
+ syntax ("volatile load
"/"volatile store
")
+ is still accepted, but is now considered deprecated.
diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp
index a5412a6..c865afd 100644
--- a/lib/AsmParser/LLParser.cpp
+++ b/lib/AsmParser/LLParser.cpp
@@ -2950,27 +2950,17 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
case lltok::kw_tail: return ParseCall(Inst, PFS, true);
// Memory.
case lltok::kw_alloca: return ParseAlloc(Inst, PFS);
- case lltok::kw_load: return ParseLoad(Inst, PFS, false, false);
- case lltok::kw_store: return ParseStore(Inst, PFS, false, false);
- case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS, false);
- case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS, false);
+ case lltok::kw_load: return ParseLoad(Inst, PFS, false);
+ case lltok::kw_store: return ParseStore(Inst, PFS, false);
+ case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS);
+ case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS);
case lltok::kw_fence: return ParseFence(Inst, PFS);
- case lltok::kw_atomic: {
- bool isVolatile = EatIfPresent(lltok::kw_volatile);
- if (EatIfPresent(lltok::kw_load))
- return ParseLoad(Inst, PFS, true, isVolatile);
- else if (EatIfPresent(lltok::kw_store))
- return ParseStore(Inst, PFS, true, isVolatile);
- }
case lltok::kw_volatile:
+ // For compatibility; canonical location is after load
if (EatIfPresent(lltok::kw_load))
- return ParseLoad(Inst, PFS, false, true);
+ return ParseLoad(Inst, PFS, true);
else if (EatIfPresent(lltok::kw_store))
- return ParseStore(Inst, PFS, false, true);
- else if (EatIfPresent(lltok::kw_cmpxchg))
- return ParseCmpXchg(Inst, PFS, true);
- else if (EatIfPresent(lltok::kw_atomicrmw))
- return ParseAtomicRMW(Inst, PFS, true);
+ return ParseStore(Inst, PFS, true);
else
return TokError("expected 'load' or 'store'");
case lltok::kw_getelementptr: return ParseGetElementPtr(Inst, PFS);
@@ -3694,16 +3684,34 @@ int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS) {
}
/// ParseLoad
-/// ::= 'volatile'? 'load' TypeAndValue (',' 'align' i32)?
-// ::= 'atomic' 'volatile'? 'load' TypeAndValue
-// 'singlethread'? AtomicOrdering (',' 'align' i32)?
+/// ::= 'load' 'volatile'? TypeAndValue (',' 'align' i32)?
+/// ::= 'load' 'atomic' 'volatile'? TypeAndValue
+/// 'singlethread'? AtomicOrdering (',' 'align' i32)?
+/// Compatibility:
+/// ::= 'volatile' 'load' TypeAndValue (',' 'align' i32)?
int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
- bool isAtomic, bool isVolatile) {
+ bool isVolatile) {
Value *Val; LocTy Loc;
unsigned Alignment = 0;
bool AteExtraComma = false;
+ bool isAtomic = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
+
+ if (Lex.getKind() == lltok::kw_atomic) {
+ if (isVolatile)
+ return TokError("mixing atomic with old volatile placement");
+ isAtomic = true;
+ Lex.Lex();
+ }
+
+ if (Lex.getKind() == lltok::kw_volatile) {
+ if (isVolatile)
+ return TokError("duplicate volatile before and after store");
+ isVolatile = true;
+ Lex.Lex();
+ }
+
if (ParseTypeAndValue(Val, Loc, PFS) ||
ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
@@ -3722,16 +3730,35 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
}
/// ParseStore
-/// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
-/// ::= 'atomic' 'volatile'? 'store' TypeAndValue ',' TypeAndValue
+
+/// ::= 'store' 'volatile'? TypeAndValue ',' TypeAndValue (',' 'align' i32)?
+/// ::= 'store' 'atomic' 'volatile'? TypeAndValue ',' TypeAndValue
/// 'singlethread'? AtomicOrdering (',' 'align' i32)?
+/// Compatibility:
+/// ::= 'volatile' 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
- bool isAtomic, bool isVolatile) {
+ bool isVolatile) {
Value *Val, *Ptr; LocTy Loc, PtrLoc;
unsigned Alignment = 0;
bool AteExtraComma = false;
+ bool isAtomic = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
+
+ if (Lex.getKind() == lltok::kw_atomic) {
+ if (isVolatile)
+ return TokError("mixing atomic with old volatile placement");
+ isAtomic = true;
+ Lex.Lex();
+ }
+
+ if (Lex.getKind() == lltok::kw_volatile) {
+ if (isVolatile)
+ return TokError("duplicate volatile before and after store");
+ isVolatile = true;
+ Lex.Lex();
+ }
+
if (ParseTypeAndValue(Val, Loc, PFS) ||
ParseToken(lltok::comma, "expected ',' after store operand") ||
ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
@@ -3755,14 +3782,18 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
}
/// ParseCmpXchg
-/// ::= 'volatile'? 'cmpxchg' TypeAndValue ',' TypeAndValue ',' TypeAndValue
-/// 'singlethread'? AtomicOrdering
-int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS,
- bool isVolatile) {
+/// ::= 'cmpxchg' 'volatile'? TypeAndValue ',' TypeAndValue ',' TypeAndValue
+/// 'singlethread'? AtomicOrdering
+int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc;
bool AteExtraComma = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
+ bool isVolatile = false;
+
+ if (EatIfPresent(lltok::kw_volatile))
+ isVolatile = true;
+
if (ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
ParseToken(lltok::comma, "expected ',' after cmpxchg address") ||
ParseTypeAndValue(Cmp, CmpLoc, PFS) ||
@@ -3794,15 +3825,19 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS,
}
/// ParseAtomicRMW
-/// ::= 'volatile'? 'atomicrmw' BinOp TypeAndValue ',' TypeAndValue
-/// 'singlethread'? AtomicOrdering
-int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS,
- bool isVolatile) {
+/// ::= 'atomicrmw' 'volatile'? BinOp TypeAndValue ',' TypeAndValue
+/// 'singlethread'? AtomicOrdering
+int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Val; LocTy PtrLoc, ValLoc;
bool AteExtraComma = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
+ bool isVolatile = false;
AtomicRMWInst::BinOp Operation;
+
+ if (EatIfPresent(lltok::kw_volatile))
+ isVolatile = true;
+
switch (Lex.getKind()) {
default: return TokError("expected binary operation in atomicrmw");
case lltok::kw_xchg: Operation = AtomicRMWInst::Xchg; break;
diff --git a/lib/AsmParser/LLParser.h b/lib/AsmParser/LLParser.h
index ef4d3db..cbc3c23 100644
--- a/lib/AsmParser/LLParser.h
+++ b/lib/AsmParser/LLParser.h
@@ -363,12 +363,10 @@ namespace llvm {
bool ParseLandingPad(Instruction *&I, PerFunctionState &PFS);
bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail);
int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
- int ParseLoad(Instruction *&I, PerFunctionState &PFS,
- bool isAtomic, bool isVolatile);
- int ParseStore(Instruction *&I, PerFunctionState &PFS,
- bool isAtomic, bool isVolatile);
- int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
- int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
+ int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
+ int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
+ int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS);
+ int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS);
int ParseFence(Instruction *&I, PerFunctionState &PFS);
int ParseGetElementPtr(Instruction *&I, PerFunctionState &PFS);
int ParseExtractValue(Instruction *&I, PerFunctionState &PFS);
diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp
index d166604..1fc94ba 100644
--- a/lib/VMCore/AsmWriter.cpp
+++ b/lib/VMCore/AsmWriter.cpp
@@ -1658,16 +1658,6 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
else
Out << '%' << SlotNum << " = ";
}
-
- // If this is an atomic load or store, print out the atomic marker.
- if ((isa(I) && cast(I).isAtomic()) ||
- (isa(I) && cast(I).isAtomic()))
- Out << "atomic ";
-
- // If this is a volatile load or store, print out the volatile marker.
- if ((isa(I) && cast(I).isVolatile()) ||
- (isa(I) && cast(I).isVolatile()))
- Out << "volatile ";
if (isa(I) && cast(I).isTailCall())
Out << "tail ";
@@ -1675,6 +1665,18 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// Print out the opcode...
Out << I.getOpcodeName();
+ // If this is an atomic load or store, print out the atomic marker.
+ if ((isa(I) && cast(I).isAtomic()) ||
+ (isa(I) && cast(I).isAtomic()))
+ Out << " atomic";
+
+ // If this is a volatile operation, print out the volatile marker.
+ if ((isa(I) && cast(I).isVolatile()) ||
+ (isa(I) && cast(I).isVolatile()) ||
+ (isa(I) && cast(I).isVolatile()) ||
+ (isa(I) && cast(I).isVolatile()))
+ Out << " volatile";
+
// Print out optimization information.
WriteOptimizationInfo(Out, &I);
diff --git a/test/Assembler/atomic.ll b/test/Assembler/atomic.ll
new file mode 100644
index 0000000..fa6f1f4
--- /dev/null
+++ b/test/Assembler/atomic.ll
@@ -0,0 +1,26 @@
+; RUN: opt -S < %s | FileCheck %s
+; Basic smoke test for atomic operations.
+
+define void @f(i32* %x) {
+ ; CHECK: load atomic i32* %x unordered, align 4
+ load atomic i32* %x unordered, align 4
+ ; CHECK: load atomic volatile i32* %x singlethread acquire, align 4
+ load atomic volatile i32* %x singlethread acquire, align 4
+ ; CHECK: store atomic i32 3, i32* %x release, align 4
+ store atomic i32 3, i32* %x release, align 4
+ ; CHECK: store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
+ store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
+ ; CHECK: cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic
+ cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic
+ ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel
+ cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel
+ ; CHECK: atomicrmw add i32* %x, i32 10 seq_cst
+ atomicrmw add i32* %x, i32 10 seq_cst
+ ; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic
+ atomicrmw volatile xchg i32* %x, i32 10 monotonic
+ ; CHECK: fence singlethread release
+ fence singlethread release
+ ; CHECK: fence seq_cst
+ fence seq_cst
+ ret void
+}
diff --git a/test/Transforms/DeadArgElim/deadexternal.ll b/test/Transforms/DeadArgElim/deadexternal.ll
index 8409261..b2d63ec 100644
--- a/test/Transforms/DeadArgElim/deadexternal.ll
+++ b/test/Transforms/DeadArgElim/deadexternal.ll
@@ -31,7 +31,7 @@ define void @h() {
entry:
%i = alloca i32, align 4
volatile store i32 10, i32* %i, align 4
-; CHECK: %tmp = volatile load i32* %i, align 4
+; CHECK: %tmp = load volatile i32* %i, align 4
; CHECK-next: call void @f(i32 undef)
%tmp = volatile load i32* %i, align 4
call void @f(i32 %tmp)
diff --git a/test/Transforms/DeadStoreElimination/simple.ll b/test/Transforms/DeadStoreElimination/simple.ll
index 5f143fc..ec2f157 100644
--- a/test/Transforms/DeadStoreElimination/simple.ll
+++ b/test/Transforms/DeadStoreElimination/simple.ll
@@ -42,20 +42,20 @@ define i32 @test3(i32* %g_addr) nounwind {
define void @test4(i32* %Q) {
%a = load i32* %Q
- volatile store i32 %a, i32* %Q
+ store volatile i32 %a, i32* %Q
ret void
; CHECK: @test4
; CHECK-NEXT: load i32
-; CHECK-NEXT: volatile store
+; CHECK-NEXT: store volatile
; CHECK-NEXT: ret void
}
define void @test5(i32* %Q) {
- %a = volatile load i32* %Q
+ %a = load volatile i32* %Q
store i32 %a, i32* %Q
ret void
; CHECK: @test5
-; CHECK-NEXT: volatile load
+; CHECK-NEXT: load volatile
; CHECK-NEXT: ret void
}
diff --git a/test/Transforms/EarlyCSE/basic.ll b/test/Transforms/EarlyCSE/basic.ll
index e3c75f9..57b1697 100644
--- a/test/Transforms/EarlyCSE/basic.ll
+++ b/test/Transforms/EarlyCSE/basic.ll
@@ -13,21 +13,21 @@ define void @test1(i8 %V, i32 *%P) {
volatile store i32 %C, i32* %P
volatile store i32 %D, i32* %P
; CHECK-NEXT: %C = zext i8 %V to i32
- ; CHECK-NEXT: volatile store i32 %C
- ; CHECK-NEXT: volatile store i32 %C
+ ; CHECK-NEXT: store volatile i32 %C
+ ; CHECK-NEXT: store volatile i32 %C
%E = add i32 %C, %C
%F = add i32 %C, %C
volatile store i32 %E, i32* %P
volatile store i32 %F, i32* %P
; CHECK-NEXT: %E = add i32 %C, %C
- ; CHECK-NEXT: volatile store i32 %E
- ; CHECK-NEXT: volatile store i32 %E
+ ; CHECK-NEXT: store volatile i32 %E
+ ; CHECK-NEXT: store volatile i32 %E
%G = add nuw i32 %C, %C ;; not a CSE with E
volatile store i32 %G, i32* %P
; CHECK-NEXT: %G = add nuw i32 %C, %C
- ; CHECK-NEXT: volatile store i32 %G
+ ; CHECK-NEXT: store volatile i32 %G
ret void
}
diff --git a/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll b/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
index 0c81700..a6803ab 100644
--- a/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
+++ b/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -globalopt -S | grep {volatile load}
+; RUN: opt < %s -globalopt -S | grep {load volatile}
@t0.1441 = internal global double 0x3FD5555555555555, align 8 ; [#uses=1]
define double @foo() nounwind {
diff --git a/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll b/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll
index 626564d..6847f5e 100644
--- a/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll
+++ b/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S | grep {volatile store}
+; RUN: opt < %s -instcombine -S | grep {store volatile}
define void @test() {
%votf = alloca <4 x float> ; <<4 x float>*> [#uses=1]
diff --git a/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll b/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
index f2cc725..a24f307 100644
--- a/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
+++ b/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
+; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
@g_1 = internal global i32 0 ; [#uses=3]
diff --git a/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll b/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
index 176162d..5fb11ff 100644
--- a/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
+++ b/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
+; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
; PR2262
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
diff --git a/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll b/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
index ccfb118..8104408 100644
--- a/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
+++ b/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
+; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
; PR2496
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
diff --git a/test/Transforms/InstCombine/extractvalue.ll b/test/Transforms/InstCombine/extractvalue.ll
index 64edc18..cf36b8f 100644
--- a/test/Transforms/InstCombine/extractvalue.ll
+++ b/test/Transforms/InstCombine/extractvalue.ll
@@ -96,7 +96,7 @@ define i32 @nogep-multiuse({i32, i32}* %pair) {
}
; CHECK: define i32 @nogep-volatile
-; CHECK-NEXT: volatile load {{.*}} %pair
+; CHECK-NEXT: load volatile {{.*}} %pair
; CHECK-NEXT: extractvalue
; CHECK-NEXT: ret
define i32 @nogep-volatile({i32, i32}* %pair) {
diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll
index 0d84ae4..f033e51 100644
--- a/test/Transforms/InstCombine/intrinsics.ll
+++ b/test/Transforms/InstCombine/intrinsics.ll
@@ -152,9 +152,9 @@ entry:
ret void
; CHECK: @powi
; CHECK: %A = fdiv double 1.0{{.*}}, %V
-; CHECK: volatile store double %A,
-; CHECK: volatile store double 1.0
-; CHECK: volatile store double %V
+; CHECK: store volatile double %A,
+; CHECK: store volatile double 1.0
+; CHECK: store volatile double %V
}
define i32 @cttz(i32 %a) {
@@ -194,11 +194,11 @@ entry:
; CHECK: @cmp.simplify
; CHECK-NEXT: entry:
; CHECK-NEXT: %lz.cmp = icmp eq i32 %a, 0
-; CHECK-NEXT: volatile store i1 %lz.cmp, i1* %c
+; CHECK-NEXT: store volatile i1 %lz.cmp, i1* %c
; CHECK-NEXT: %tz.cmp = icmp ne i32 %a, 0
-; CHECK-NEXT: volatile store i1 %tz.cmp, i1* %c
+; CHECK-NEXT: store volatile i1 %tz.cmp, i1* %c
; CHECK-NEXT: %pop.cmp = icmp eq i32 %b, 0
-; CHECK-NEXT: volatile store i1 %pop.cmp, i1* %c
+; CHECK-NEXT: store volatile i1 %pop.cmp, i1* %c
}
diff --git a/test/Transforms/InstCombine/volatile_store.ll b/test/Transforms/InstCombine/volatile_store.ll
index 5316bd7..0518e5a 100644
--- a/test/Transforms/InstCombine/volatile_store.ll
+++ b/test/Transforms/InstCombine/volatile_store.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -instcombine -S | grep {volatile store}
-; RUN: opt < %s -instcombine -S | grep {volatile load}
+; RUN: opt < %s -instcombine -S | grep {store volatile}
+; RUN: opt < %s -instcombine -S | grep {load volatile}
@x = weak global i32 0 ; [#uses=2]
diff --git a/test/Transforms/JumpThreading/no-irreducible-loops.ll b/test/Transforms/JumpThreading/no-irreducible-loops.ll
index 97276b0..7c7fe39 100644
--- a/test/Transforms/JumpThreading/no-irreducible-loops.ll
+++ b/test/Transforms/JumpThreading/no-irreducible-loops.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -jump-threading -loop-rotate -instcombine -indvars -loop-unroll -simplifycfg -S -verify-dom-info -verify-loop-info > %t
-; RUN: grep {volatile store} %t | count 3
+; RUN: grep {store volatile} %t | count 3
; RUN: not grep {br label} %t
; Jump threading should not prevent this loop from being unrolled.
diff --git a/test/Transforms/LICM/2007-05-22-VolatileSink.ll b/test/Transforms/LICM/2007-05-22-VolatileSink.ll
index c12e13b..17383c2 100644
--- a/test/Transforms/LICM/2007-05-22-VolatileSink.ll
+++ b/test/Transforms/LICM/2007-05-22-VolatileSink.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -licm -S | grep {volatile store}
+; RUN: opt < %s -licm -S | grep {store volatile}
; PR1435
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "i686-apple-darwin8"
diff --git a/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll b/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll
index 5774f58..fd114f4 100644
--- a/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll
+++ b/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll
@@ -15,7 +15,7 @@ for.body4.lr.ph:
br label %for.body4
; CHECK: for.body4:
-; CHECK: volatile load i16* @g_39
+; CHECK: load volatile i16* @g_39
for.body4:
%l_612.11 = phi i32* [ undef, %for.body4.lr.ph ], [ %call19, %for.body4 ]
diff --git a/test/Transforms/LICM/scalar_promote.ll b/test/Transforms/LICM/scalar_promote.ll
index d8acdc1..9aefc4f 100644
--- a/test/Transforms/LICM/scalar_promote.ll
+++ b/test/Transforms/LICM/scalar_promote.ll
@@ -65,7 +65,7 @@ Loop:
br i1 true, label %Out, label %Loop
; CHECK: Loop:
-; CHECK-NEXT: volatile load
+; CHECK-NEXT: load volatile
Out: ; preds = %Loop
ret void
diff --git a/test/Transforms/ObjCARC/contract-storestrong.ll b/test/Transforms/ObjCARC/contract-storestrong.ll
index 50ed260..25c93f4 100644
--- a/test/Transforms/ObjCARC/contract-storestrong.ll
+++ b/test/Transforms/ObjCARC/contract-storestrong.ll
@@ -25,7 +25,7 @@ entry:
; CHECK: define void @test1(i8* %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = tail call i8* @objc_retain(i8* %p) nounwind
-; CHECK-NEXT: %tmp = volatile load i8** @x, align 8
+; CHECK-NEXT: %tmp = load volatile i8** @x, align 8
; CHECK-NEXT: store i8* %0, i8** @x, align 8
; CHECK-NEXT: tail call void @objc_release(i8* %tmp) nounwind
; CHECK-NEXT: ret void
@@ -45,7 +45,7 @@ entry:
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = tail call i8* @objc_retain(i8* %p) nounwind
; CHECK-NEXT: %tmp = load i8** @x, align 8
-; CHECK-NEXT: volatile store i8* %0, i8** @x, align 8
+; CHECK-NEXT: store volatile i8* %0, i8** @x, align 8
; CHECK-NEXT: tail call void @objc_release(i8* %tmp) nounwind
; CHECK-NEXT: ret void
; CHECK-NEXT: }
diff --git a/test/Transforms/ScalarRepl/volatile.ll b/test/Transforms/ScalarRepl/volatile.ll
index 3ff322e..ab276b0 100644
--- a/test/Transforms/ScalarRepl/volatile.ll
+++ b/test/Transforms/ScalarRepl/volatile.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -scalarrepl -S | grep {volatile load}
-; RUN: opt < %s -scalarrepl -S | grep {volatile store}
+; RUN: opt < %s -scalarrepl -S | grep {load volatile}
+; RUN: opt < %s -scalarrepl -S | grep {store volatile}
define i32 @voltest(i32 %T) {
%A = alloca {i32, i32}
diff --git a/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll b/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
index 7bca5f5..ebf4f17 100644
--- a/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
+++ b/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
@@ -11,14 +11,14 @@ entry:
br i1 %0, label %bb, label %return
bb: ; preds = %entry
- %1 = volatile load i32* null
+ %1 = load volatile i32* null
unreachable
br label %return
return: ; preds = %entry
ret void
; CHECK: @test1
-; CHECK: volatile load
+; CHECK: load volatile
}
; rdar://7958343
@@ -35,10 +35,10 @@ entry:
; PR7369
define void @test3() nounwind {
entry:
- volatile store i32 4, i32* null
+ store volatile i32 4, i32* null
ret void
; CHECK: @test3
-; CHECK: volatile store i32 4, i32* null
+; CHECK: store volatile i32 4, i32* null
; CHECK: ret
}
diff --git a/test/Transforms/SimplifyLibCalls/memcmp.ll b/test/Transforms/SimplifyLibCalls/memcmp.ll
index ee99501..6ca4dc9 100644
--- a/test/Transforms/SimplifyLibCalls/memcmp.ll
+++ b/test/Transforms/SimplifyLibCalls/memcmp.ll
@@ -10,26 +10,26 @@ declare i32 @memcmp(i8*, i8*, i32)
define void @test(i8* %P, i8* %Q, i32 %N, i32* %IP, i1* %BP) {
%A = call i32 @memcmp( i8* %P, i8* %P, i32 %N ) ; [#uses=1]
; CHECK-NOT: call {{.*}} memcmp
-; CHECK: volatile store
- volatile store i32 %A, i32* %IP
+; CHECK: store volatile
+ store volatile i32 %A, i32* %IP
%B = call i32 @memcmp( i8* %P, i8* %Q, i32 0 ) ; [#uses=1]
; CHECK-NOT: call {{.*}} memcmp
-; CHECK: volatile store
- volatile store i32 %B, i32* %IP
+; CHECK: store volatile
+ store volatile i32 %B, i32* %IP
%C = call i32 @memcmp( i8* %P, i8* %Q, i32 1 ) ; [#uses=1]
; CHECK: load
; CHECK: zext
; CHECK: load
; CHECK: zext
; CHECK: sub
-; CHECK: volatile store
- volatile store i32 %C, i32* %IP
- %F = call i32 @memcmp(i8* getelementptr ([4 x i8]* @hel, i32 0, i32 0),
- i8* getelementptr ([8 x i8]* @hello_u, i32 0, i32 0),
- i32 3)
+; CHECK: store volatile
+ store volatile i32 %C, i32* %IP
+ %F = call i32 @memcmp(i8* getelementptr ([4 x i8]* @hel, i32 0, i32 0),
+ i8* getelementptr ([8 x i8]* @hello_u, i32 0, i32 0),
+ i32 3)
; CHECK-NOT: call {{.*}} memcmp
-; CHECK: volatile store
- volatile store i32 %F, i32* %IP
+; CHECK: store volatile
+ store volatile i32 %F, i32* %IP
ret void
}
--
cgit v1.1