aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2010-01-08 23:41:50 +0000
committerEvan Cheng <evan.cheng@apple.com>2010-01-08 23:41:50 +0000
commit306b4cafc1fd8c075c422825c49bdd14a5f851d7 (patch)
tree1cb54fea8d9cfa1c3f980f10efb17457dd569800 /test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
parentd003c5b3d750706cdc0bfa34ebf85b95a7a42abe (diff)
downloadexternal_llvm-306b4cafc1fd8c075c422825c49bdd14a5f851d7.zip
external_llvm-306b4cafc1fd8c075c422825c49bdd14a5f851d7.tar.gz
external_llvm-306b4cafc1fd8c075c422825c49bdd14a5f851d7.tar.bz2
Fix a critical bug in 64-bit atomic operation lowering for 32-bit. The results of the cmpxchg8b instructions are being thrown away when it branches back to the top of the checking loop. This means the loop always compares against the old value and this can result in a dead lock.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@93028 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/2010-01-08-Atomic64Bug.ll')
-rw-r--r--test/CodeGen/X86/2010-01-08-Atomic64Bug.ll29
1 files changed, 29 insertions, 0 deletions
diff --git a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
new file mode 100644
index 0000000..172e1c7
--- /dev/null
+++ b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s
+; rdar://r7512579
+
+; PHI defs in the atomic loop should be used by the add / adc
+; instructions. They should not be dead.
+
+define void @t(i64* nocapture %p) nounwind ssp {
+entry:
+; CHECK: t:
+; CHECK: movl $1
+; CHECK: movl (%ebp), %eax
+; CHECK: movl 4(%ebp), %edx
+; CHECK: LBB1_1:
+; CHECK-NOT: movl $1
+; CHECK-NOT: movl $0
+; CHECK: addl
+; CHECK: adcl
+; CHECK: lock
+; CHECK: cmpxchg8b
+; CHECK: jne
+ tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
+ tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ ret void
+}
+
+declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
+
+declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind