aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/X86/2010-01-08-Atomic64Bug.ll13
-rw-r--r--test/CodeGen/X86/atomic16.ll250
-rw-r--r--test/CodeGen/X86/atomic32.ll250
-rw-r--r--test/CodeGen/X86/atomic64.ll216
-rw-r--r--test/CodeGen/X86/atomic6432.ll209
-rw-r--r--test/CodeGen/X86/atomic8.ll251
-rw-r--r--test/CodeGen/X86/atomic_op.ll11
-rw-r--r--test/CodeGen/X86/pr13458.ll14
8 files changed, 1201 insertions, 13 deletions
diff --git a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
index 8b55bd7..e969b13 100644
--- a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
+++ b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
@@ -7,17 +7,16 @@
define void @t(i64* nocapture %p) nounwind ssp {
entry:
; CHECK: t:
-; CHECK: movl $1
-; CHECK: movl (%ebp), %eax
-; CHECK: movl 4(%ebp), %edx
+; CHECK: movl ([[REG:%[a-z]+]]), %eax
+; CHECK: movl 4([[REG]]), %edx
; CHECK: LBB0_1:
-; CHECK-NOT: movl $1
-; CHECK-NOT: movl $0
+; CHECK: movl $1
; CHECK: addl
+; CHECK: movl $0
; CHECK: adcl
; CHECK: lock
-; CHECK: cmpxchg8b
-; CHECK: jne
+; CHECK-NEXT: cmpxchg8b ([[REG]])
+; CHECK-NEXT: jne
%0 = atomicrmw add i64* %p, i64 1 seq_cst
ret void
}
diff --git a/test/CodeGen/X86/atomic16.ll b/test/CodeGen/X86/atomic16.ll
new file mode 100644
index 0000000..e276d47
--- /dev/null
+++ b/test/CodeGen/X86/atomic16.ll
@@ -0,0 +1,250 @@
+; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64
+; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32
+
+@sc16 = external global i16
+
+define void @atomic_fetch_add16() nounwind {
+; X64: atomic_fetch_add16
+; X32: atomic_fetch_add16
+entry:
+; 32-bit
+ %t1 = atomicrmw add i16* @sc16, i16 1 acquire
+; X64: lock
+; X64: incw
+; X32: lock
+; X32: incw
+ %t2 = atomicrmw add i16* @sc16, i16 3 acquire
+; X64: lock
+; X64: addw $3
+; X32: lock
+; X32: addw $3
+ %t3 = atomicrmw add i16* @sc16, i16 5 acquire
+; X64: lock
+; X64: xaddw
+; X32: lock
+; X32: xaddw
+ %t4 = atomicrmw add i16* @sc16, i16 %t3 acquire
+; X64: lock
+; X64: addw
+; X32: lock
+; X32: addw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_sub16() nounwind {
+; X64: atomic_fetch_sub16
+; X32: atomic_fetch_sub16
+ %t1 = atomicrmw sub i16* @sc16, i16 1 acquire
+; X64: lock
+; X64: decw
+; X32: lock
+; X32: decw
+ %t2 = atomicrmw sub i16* @sc16, i16 3 acquire
+; X64: lock
+; X64: subw $3
+; X32: lock
+; X32: subw $3
+ %t3 = atomicrmw sub i16* @sc16, i16 5 acquire
+; X64: lock
+; X64: xaddw
+; X32: lock
+; X32: xaddw
+ %t4 = atomicrmw sub i16* @sc16, i16 %t3 acquire
+; X64: lock
+; X64: subw
+; X32: lock
+; X32: subw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_and16() nounwind {
+; X64: atomic_fetch_and16
+; X32: atomic_fetch_and16
+ %t1 = atomicrmw and i16* @sc16, i16 3 acquire
+; X64: lock
+; X64: andw $3
+; X32: lock
+; X32: andw $3
+ %t2 = atomicrmw and i16* @sc16, i16 5 acquire
+; X64: andw
+; X64: lock
+; X64: cmpxchgw
+; X32: andw
+; X32: lock
+; X32: cmpxchgw
+ %t3 = atomicrmw and i16* @sc16, i16 %t2 acquire
+; X64: lock
+; X64: andw
+; X32: lock
+; X32: andw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_or16() nounwind {
+; X64: atomic_fetch_or16
+; X32: atomic_fetch_or16
+ %t1 = atomicrmw or i16* @sc16, i16 3 acquire
+; X64: lock
+; X64: orw $3
+; X32: lock
+; X32: orw $3
+ %t2 = atomicrmw or i16* @sc16, i16 5 acquire
+; X64: orw
+; X64: lock
+; X64: cmpxchgw
+; X32: orw
+; X32: lock
+; X32: cmpxchgw
+ %t3 = atomicrmw or i16* @sc16, i16 %t2 acquire
+; X64: lock
+; X64: orw
+; X32: lock
+; X32: orw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_xor16() nounwind {
+; X64: atomic_fetch_xor16
+; X32: atomic_fetch_xor16
+ %t1 = atomicrmw xor i16* @sc16, i16 3 acquire
+; X64: lock
+; X64: xorw $3
+; X32: lock
+; X32: xorw $3
+ %t2 = atomicrmw xor i16* @sc16, i16 5 acquire
+; X64: xorw
+; X64: lock
+; X64: cmpxchgw
+; X32: xorw
+; X32: lock
+; X32: cmpxchgw
+ %t3 = atomicrmw xor i16* @sc16, i16 %t2 acquire
+; X64: lock
+; X64: xorw
+; X32: lock
+; X32: xorw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_nand16(i16 %x) nounwind {
+; X64: atomic_fetch_nand16
+; X32: atomic_fetch_nand16
+ %t1 = atomicrmw nand i16* @sc16, i16 %x acquire
+; X64: andw
+; X64: notw
+; X64: lock
+; X64: cmpxchgw
+; X32: andw
+; X32: notw
+; X32: lock
+; X32: cmpxchgw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_max16(i16 %x) nounwind {
+ %t1 = atomicrmw max i16* @sc16, i16 %x acquire
+; X64: cmpw
+; X64: cmov
+; X64: lock
+; X64: cmpxchgw
+
+; X32: cmpw
+; X32: cmov
+; X32: lock
+; X32: cmpxchgw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_min16(i16 %x) nounwind {
+ %t1 = atomicrmw min i16* @sc16, i16 %x acquire
+; X64: cmpw
+; X64: cmov
+; X64: lock
+; X64: cmpxchgw
+
+; X32: cmpw
+; X32: cmov
+; X32: lock
+; X32: cmpxchgw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_umax16(i16 %x) nounwind {
+ %t1 = atomicrmw umax i16* @sc16, i16 %x acquire
+; X64: cmpw
+; X64: cmov
+; X64: lock
+; X64: cmpxchgw
+
+; X32: cmpw
+; X32: cmov
+; X32: lock
+; X32: cmpxchgw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_umin16(i16 %x) nounwind {
+ %t1 = atomicrmw umin i16* @sc16, i16 %x acquire
+; X64: cmpw
+; X64: cmov
+; X64: lock
+; X64: cmpxchgw
+; X32: cmpw
+; X32: cmov
+; X32: lock
+; X32: cmpxchgw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_cmpxchg16() nounwind {
+ %t1 = cmpxchg i16* @sc16, i16 0, i16 1 acquire
+; X64: lock
+; X64: cmpxchgw
+; X32: lock
+; X32: cmpxchgw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_store16(i16 %x) nounwind {
+ store atomic i16 %x, i16* @sc16 release, align 4
+; X64-NOT: lock
+; X64: movw
+; X32-NOT: lock
+; X32: movw
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_swap16(i16 %x) nounwind {
+ %t1 = atomicrmw xchg i16* @sc16, i16 %x acquire
+; X64-NOT: lock
+; X64: xchgw
+; X32-NOT: lock
+; X32: xchgw
+ ret void
+; X64: ret
+; X32: ret
+}
diff --git a/test/CodeGen/X86/atomic32.ll b/test/CodeGen/X86/atomic32.ll
new file mode 100644
index 0000000..dc927d8
--- /dev/null
+++ b/test/CodeGen/X86/atomic32.ll
@@ -0,0 +1,250 @@
+; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64
+; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32
+
+@sc32 = external global i32
+
+define void @atomic_fetch_add32() nounwind {
+; X64: atomic_fetch_add32
+; X32: atomic_fetch_add32
+entry:
+; 32-bit
+ %t1 = atomicrmw add i32* @sc32, i32 1 acquire
+; X64: lock
+; X64: incl
+; X32: lock
+; X32: incl
+ %t2 = atomicrmw add i32* @sc32, i32 3 acquire
+; X64: lock
+; X64: addl $3
+; X32: lock
+; X32: addl $3
+ %t3 = atomicrmw add i32* @sc32, i32 5 acquire
+; X64: lock
+; X64: xaddl
+; X32: lock
+; X32: xaddl
+ %t4 = atomicrmw add i32* @sc32, i32 %t3 acquire
+; X64: lock
+; X64: addl
+; X32: lock
+; X32: addl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_sub32() nounwind {
+; X64: atomic_fetch_sub32
+; X32: atomic_fetch_sub32
+ %t1 = atomicrmw sub i32* @sc32, i32 1 acquire
+; X64: lock
+; X64: decl
+; X32: lock
+; X32: decl
+ %t2 = atomicrmw sub i32* @sc32, i32 3 acquire
+; X64: lock
+; X64: subl $3
+; X32: lock
+; X32: subl $3
+ %t3 = atomicrmw sub i32* @sc32, i32 5 acquire
+; X64: lock
+; X64: xaddl
+; X32: lock
+; X32: xaddl
+ %t4 = atomicrmw sub i32* @sc32, i32 %t3 acquire
+; X64: lock
+; X64: subl
+; X32: lock
+; X32: subl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_and32() nounwind {
+; X64: atomic_fetch_and32
+; X32: atomic_fetch_and32
+ %t1 = atomicrmw and i32* @sc32, i32 3 acquire
+; X64: lock
+; X64: andl $3
+; X32: lock
+; X32: andl $3
+ %t2 = atomicrmw and i32* @sc32, i32 5 acquire
+; X64: andl
+; X64: lock
+; X64: cmpxchgl
+; X32: andl
+; X32: lock
+; X32: cmpxchgl
+ %t3 = atomicrmw and i32* @sc32, i32 %t2 acquire
+; X64: lock
+; X64: andl
+; X32: lock
+; X32: andl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_or32() nounwind {
+; X64: atomic_fetch_or32
+; X32: atomic_fetch_or32
+ %t1 = atomicrmw or i32* @sc32, i32 3 acquire
+; X64: lock
+; X64: orl $3
+; X32: lock
+; X32: orl $3
+ %t2 = atomicrmw or i32* @sc32, i32 5 acquire
+; X64: orl
+; X64: lock
+; X64: cmpxchgl
+; X32: orl
+; X32: lock
+; X32: cmpxchgl
+ %t3 = atomicrmw or i32* @sc32, i32 %t2 acquire
+; X64: lock
+; X64: orl
+; X32: lock
+; X32: orl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_xor32() nounwind {
+; X64: atomic_fetch_xor32
+; X32: atomic_fetch_xor32
+ %t1 = atomicrmw xor i32* @sc32, i32 3 acquire
+; X64: lock
+; X64: xorl $3
+; X32: lock
+; X32: xorl $3
+ %t2 = atomicrmw xor i32* @sc32, i32 5 acquire
+; X64: xorl
+; X64: lock
+; X64: cmpxchgl
+; X32: xorl
+; X32: lock
+; X32: cmpxchgl
+ %t3 = atomicrmw xor i32* @sc32, i32 %t2 acquire
+; X64: lock
+; X64: xorl
+; X32: lock
+; X32: xorl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_nand32(i32 %x) nounwind {
+; X64: atomic_fetch_nand32
+; X32: atomic_fetch_nand32
+ %t1 = atomicrmw nand i32* @sc32, i32 %x acquire
+; X64: andl
+; X64: notl
+; X64: lock
+; X64: cmpxchgl
+; X32: andl
+; X32: notl
+; X32: lock
+; X32: cmpxchgl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_max32(i32 %x) nounwind {
+ %t1 = atomicrmw max i32* @sc32, i32 %x acquire
+; X64: cmpl
+; X64: cmov
+; X64: lock
+; X64: cmpxchgl
+
+; X32: cmpl
+; X32: cmov
+; X32: lock
+; X32: cmpxchgl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_min32(i32 %x) nounwind {
+ %t1 = atomicrmw min i32* @sc32, i32 %x acquire
+; X64: cmpl
+; X64: cmov
+; X64: lock
+; X64: cmpxchgl
+
+; X32: cmpl
+; X32: cmov
+; X32: lock
+; X32: cmpxchgl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_umax32(i32 %x) nounwind {
+ %t1 = atomicrmw umax i32* @sc32, i32 %x acquire
+; X64: cmpl
+; X64: cmov
+; X64: lock
+; X64: cmpxchgl
+
+; X32: cmpl
+; X32: cmov
+; X32: lock
+; X32: cmpxchgl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_umin32(i32 %x) nounwind {
+ %t1 = atomicrmw umin i32* @sc32, i32 %x acquire
+; X64: cmpl
+; X64: cmov
+; X64: lock
+; X64: cmpxchgl
+; X32: cmpl
+; X32: cmov
+; X32: lock
+; X32: cmpxchgl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_cmpxchg32() nounwind {
+ %t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire
+; X64: lock
+; X64: cmpxchgl
+; X32: lock
+; X32: cmpxchgl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_store32(i32 %x) nounwind {
+ store atomic i32 %x, i32* @sc32 release, align 4
+; X64-NOT: lock
+; X64: movl
+; X32-NOT: lock
+; X32: movl
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_swap32(i32 %x) nounwind {
+ %t1 = atomicrmw xchg i32* @sc32, i32 %x acquire
+; X64-NOT: lock
+; X64: xchgl
+; X32-NOT: lock
+; X32: xchgl
+ ret void
+; X64: ret
+; X32: ret
+}
diff --git a/test/CodeGen/X86/atomic64.ll b/test/CodeGen/X86/atomic64.ll
new file mode 100644
index 0000000..45785cc
--- /dev/null
+++ b/test/CodeGen/X86/atomic64.ll
@@ -0,0 +1,216 @@
+; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64
+
+@sc64 = external global i64
+
+define void @atomic_fetch_add64() nounwind {
+; X64: atomic_fetch_add64
+entry:
+ %t1 = atomicrmw add i64* @sc64, i64 1 acquire
+; X64: lock
+; X64: incq
+ %t2 = atomicrmw add i64* @sc64, i64 3 acquire
+; X64: lock
+; X64: addq $3
+ %t3 = atomicrmw add i64* @sc64, i64 5 acquire
+; X64: lock
+; X64: xaddq
+ %t4 = atomicrmw add i64* @sc64, i64 %t3 acquire
+; X64: lock
+; X64: addq
+ ret void
+; X64: ret
+}
+
+define void @atomic_fetch_sub64() nounwind {
+; X64: atomic_fetch_sub64
+ %t1 = atomicrmw sub i64* @sc64, i64 1 acquire
+; X64: lock
+; X64: decq
+ %t2 = atomicrmw sub i64* @sc64, i64 3 acquire
+; X64: lock
+; X64: subq $3
+ %t3 = atomicrmw sub i64* @sc64, i64 5 acquire
+; X64: lock
+; X64: xaddq
+ %t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire
+; X64: lock
+; X64: subq
+ ret void
+; X64: ret
+}
+
+define void @atomic_fetch_and64() nounwind {
+; X64: atomic_fetch_and64
+ %t1 = atomicrmw and i64* @sc64, i64 3 acquire
+; X64: lock
+; X64: andq $3
+ %t2 = atomicrmw and i64* @sc64, i64 5 acquire
+; X64: andq
+; X64: lock
+; X64: cmpxchgq
+ %t3 = atomicrmw and i64* @sc64, i64 %t2 acquire
+; X64: lock
+; X64: andq
+ ret void
+; X64: ret
+}
+
+define void @atomic_fetch_or64() nounwind {
+; X64: atomic_fetch_or64
+ %t1 = atomicrmw or i64* @sc64, i64 3 acquire
+; X64: lock
+; X64: orq $3
+ %t2 = atomicrmw or i64* @sc64, i64 5 acquire
+; X64: orq
+; X64: lock
+; X64: cmpxchgq
+ %t3 = atomicrmw or i64* @sc64, i64 %t2 acquire
+; X64: lock
+; X64: orq
+ ret void
+; X64: ret
+}
+
+define void @atomic_fetch_xor64() nounwind {
+; X64: atomic_fetch_xor64
+ %t1 = atomicrmw xor i64* @sc64, i64 3 acquire
+; X64: lock
+; X64: xorq $3
+ %t2 = atomicrmw xor i64* @sc64, i64 5 acquire
+; X64: xorq
+; X64: lock
+; X64: cmpxchgq
+ %t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire
+; X64: lock
+; X64: xorq
+ ret void
+; X64: ret
+}
+
+define void @atomic_fetch_nand64(i64 %x) nounwind {
+; X64: atomic_fetch_nand64
+; X32: atomic_fetch_nand64
+ %t1 = atomicrmw nand i64* @sc64, i64 %x acquire
+; X64: andq
+; X64: notq
+; X64: lock
+; X64: cmpxchgq
+; X32: andl
+; X32: andl
+; X32: notl
+; X32: notl
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_max64(i64 %x) nounwind {
+ %t1 = atomicrmw max i64* @sc64, i64 %x acquire
+; X64: cmpq
+; X64: cmov
+; X64: lock
+; X64: cmpxchgq
+
+; X32: cmpl
+; X32: cmpl
+; X32: cmov
+; X32: cmov
+; X32: cmov
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_min64(i64 %x) nounwind {
+ %t1 = atomicrmw min i64* @sc64, i64 %x acquire
+; X64: cmpq
+; X64: cmov
+; X64: lock
+; X64: cmpxchgq
+
+; X32: cmpl
+; X32: cmpl
+; X32: cmov
+; X32: cmov
+; X32: cmov
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_umax64(i64 %x) nounwind {
+ %t1 = atomicrmw umax i64* @sc64, i64 %x acquire
+; X64: cmpq
+; X64: cmov
+; X64: lock
+; X64: cmpxchgq
+
+; X32: cmpl
+; X32: cmpl
+; X32: cmov
+; X32: cmov
+; X32: cmov
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_umin64(i64 %x) nounwind {
+ %t1 = atomicrmw umin i64* @sc64, i64 %x acquire
+; X64: cmpq
+; X64: cmov
+; X64: lock
+; X64: cmpxchgq
+
+; X32: cmpl
+; X32: cmpl
+; X32: cmov
+; X32: cmov
+; X32: cmov
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_cmpxchg64() nounwind {
+ %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire
+; X64: lock
+; X64: cmpxchgq
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_store64(i64 %x) nounwind {
+ store atomic i64 %x, i64* @sc64 release, align 8
+; X64-NOT: lock
+; X64: movq
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_swap64(i64 %x) nounwind {
+ %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
+; X64-NOT: lock
+; X64: xchgq
+; X32: lock
+; X32: xchg8b
+ ret void
+; X64: ret
+; X32: ret
+}
diff --git a/test/CodeGen/X86/atomic6432.ll b/test/CodeGen/X86/atomic6432.ll
new file mode 100644
index 0000000..556c36e
--- /dev/null
+++ b/test/CodeGen/X86/atomic6432.ll
@@ -0,0 +1,209 @@
+; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32
+; XFAIL: *
+
+@sc64 = external global i64
+
+define void @atomic_fetch_add64() nounwind {
+; X32: atomic_fetch_add64
+entry:
+ %t1 = atomicrmw add i64* @sc64, i64 1 acquire
+; X32: addl
+; X32: adcl
+; X32: lock
+; X32: cmpxchg8b
+ %t2 = atomicrmw add i64* @sc64, i64 3 acquire
+; X32: addl
+; X32: adcl
+; X32: lock
+; X32: cmpxchg8b
+ %t3 = atomicrmw add i64* @sc64, i64 5 acquire
+; X32: addl
+; X32: adcl
+; X32: lock
+; X32: cmpxchg8b
+ %t4 = atomicrmw add i64* @sc64, i64 %t3 acquire
+; X32: addl
+; X32: adcl
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_sub64() nounwind {
+; X32: atomic_fetch_sub64
+ %t1 = atomicrmw sub i64* @sc64, i64 1 acquire
+; X32: subl
+; X32: sbbl
+; X32: lock
+; X32: cmpxchg8b
+ %t2 = atomicrmw sub i64* @sc64, i64 3 acquire
+; X32: subl
+; X32: sbbl
+; X32: lock
+; X32: cmpxchg8b
+ %t3 = atomicrmw sub i64* @sc64, i64 5 acquire
+; X32: subl
+; X32: sbbl
+; X32: lock
+; X32: cmpxchg8b
+ %t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire
+; X32: subl
+; X32: sbbl
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_and64() nounwind {
+; X32: atomic_fetch_and64
+ %t1 = atomicrmw and i64* @sc64, i64 3 acquire
+; X32: andl
+; X32: andl
+; X32: lock
+; X32: cmpxchg8b
+ %t2 = atomicrmw and i64* @sc64, i64 5 acquire
+; X32: andl
+; X32: andl
+; X32: lock
+; X32: cmpxchg8b
+ %t3 = atomicrmw and i64* @sc64, i64 %t2 acquire
+; X32: andl
+; X32: andl
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_or64() nounwind {
+; X32: atomic_fetch_or64
+ %t1 = atomicrmw or i64* @sc64, i64 3 acquire
+; X32: orl
+; X32: orl
+; X32: lock
+; X32: cmpxchg8b
+ %t2 = atomicrmw or i64* @sc64, i64 5 acquire
+; X32: orl
+; X32: orl
+; X32: lock
+; X32: cmpxchg8b
+ %t3 = atomicrmw or i64* @sc64, i64 %t2 acquire
+; X32: orl
+; X32: orl
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_xor64() nounwind {
+; X32: atomic_fetch_xor64
+ %t1 = atomicrmw xor i64* @sc64, i64 3 acquire
+; X32: xorl
+; X32: xorl
+; X32: lock
+; X32: cmpxchg8b
+ %t2 = atomicrmw xor i64* @sc64, i64 5 acquire
+; X32: xorl
+; X32: xorl
+; X32: lock
+; X32: cmpxchg8b
+ %t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire
+; X32: xorl
+; X32: xorl
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_nand64(i64 %x) nounwind {
+; X32: atomic_fetch_nand64
+ %t1 = atomicrmw nand i64* @sc64, i64 %x acquire
+; X32: andl
+; X32: andl
+; X32: notl
+; X32: notl
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_max64(i64 %x) nounwind {
+ %t1 = atomicrmw max i64* @sc64, i64 %x acquire
+; X32: cmpl
+; X32: cmpl
+; X32: cmov
+; X32: cmov
+; X32: cmov
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_min64(i64 %x) nounwind {
+ %t1 = atomicrmw min i64* @sc64, i64 %x acquire
+; X32: cmpl
+; X32: cmpl
+; X32: cmov
+; X32: cmov
+; X32: cmov
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_umax64(i64 %x) nounwind {
+ %t1 = atomicrmw umax i64* @sc64, i64 %x acquire
+; X32: cmpl
+; X32: cmpl
+; X32: cmov
+; X32: cmov
+; X32: cmov
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_umin64(i64 %x) nounwind {
+ %t1 = atomicrmw umin i64* @sc64, i64 %x acquire
+; X32: cmpl
+; X32: cmpl
+; X32: cmov
+; X32: cmov
+; X32: cmov
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_cmpxchg64() nounwind {
+ %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_store64(i64 %x) nounwind {
+ store atomic i64 %x, i64* @sc64 release, align 8
+; X32: lock
+; X32: cmpxchg8b
+ ret void
+; X32: ret
+}
+
+define void @atomic_fetch_swap64(i64 %x) nounwind {
+ %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
+; X32: lock
+; X32: xchg8b
+ ret void
+; X32: ret
+}
diff --git a/test/CodeGen/X86/atomic8.ll b/test/CodeGen/X86/atomic8.ll
new file mode 100644
index 0000000..035a28d
--- /dev/null
+++ b/test/CodeGen/X86/atomic8.ll
@@ -0,0 +1,251 @@
+; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64
+; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32
+; XFAIL: *
+
+@sc8 = external global i8
+
+define void @atomic_fetch_add8() nounwind {
+; X64: atomic_fetch_add8
+; X32: atomic_fetch_add8
+entry:
+; 32-bit
+ %t1 = atomicrmw add i8* @sc8, i8 1 acquire
+; X64: lock
+; X64: incb
+; X32: lock
+; X32: incb
+ %t2 = atomicrmw add i8* @sc8, i8 3 acquire
+; X64: lock
+; X64: addb $3
+; X32: lock
+; X32: addb $3
+ %t3 = atomicrmw add i8* @sc8, i8 5 acquire
+; X64: lock
+; X64: xaddb
+; X32: lock
+; X32: xaddb
+ %t4 = atomicrmw add i8* @sc8, i8 %t3 acquire
+; X64: lock
+; X64: addb
+; X32: lock
+; X32: addb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_sub8() nounwind {
+; X64: atomic_fetch_sub8
+; X32: atomic_fetch_sub8
+ %t1 = atomicrmw sub i8* @sc8, i8 1 acquire
+; X64: lock
+; X64: decb
+; X32: lock
+; X32: decb
+ %t2 = atomicrmw sub i8* @sc8, i8 3 acquire
+; X64: lock
+; X64: subb $3
+; X32: lock
+; X32: subb $3
+ %t3 = atomicrmw sub i8* @sc8, i8 5 acquire
+; X64: lock
+; X64: xaddb
+; X32: lock
+; X32: xaddb
+ %t4 = atomicrmw sub i8* @sc8, i8 %t3 acquire
+; X64: lock
+; X64: subb
+; X32: lock
+; X32: subb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_and8() nounwind {
+; X64: atomic_fetch_and8
+; X32: atomic_fetch_and8
+ %t1 = atomicrmw and i8* @sc8, i8 3 acquire
+; X64: lock
+; X64: andb $3
+; X32: lock
+; X32: andb $3
+ %t2 = atomicrmw and i8* @sc8, i8 5 acquire
+; X64: andb
+; X64: lock
+; X64: cmpxchgb
+; X32: andb
+; X32: lock
+; X32: cmpxchgb
+ %t3 = atomicrmw and i8* @sc8, i8 %t2 acquire
+; X64: lock
+; X64: andb
+; X32: lock
+; X32: andb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_or8() nounwind {
+; X64: atomic_fetch_or8
+; X32: atomic_fetch_or8
+ %t1 = atomicrmw or i8* @sc8, i8 3 acquire
+; X64: lock
+; X64: orb $3
+; X32: lock
+; X32: orb $3
+ %t2 = atomicrmw or i8* @sc8, i8 5 acquire
+; X64: orb
+; X64: lock
+; X64: cmpxchgb
+; X32: orb
+; X32: lock
+; X32: cmpxchgb
+ %t3 = atomicrmw or i8* @sc8, i8 %t2 acquire
+; X64: lock
+; X64: orb
+; X32: lock
+; X32: orb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_xor8() nounwind {
+; X64: atomic_fetch_xor8
+; X32: atomic_fetch_xor8
+ %t1 = atomicrmw xor i8* @sc8, i8 3 acquire
+; X64: lock
+; X64: xorb $3
+; X32: lock
+; X32: xorb $3
+ %t2 = atomicrmw xor i8* @sc8, i8 5 acquire
+; X64: xorb
+; X64: lock
+; X64: cmpxchgb
+; X32: xorb
+; X32: lock
+; X32: cmpxchgb
+ %t3 = atomicrmw xor i8* @sc8, i8 %t2 acquire
+; X64: lock
+; X64: xorb
+; X32: lock
+; X32: xorb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_nand8(i8 %x) nounwind {
+; X64: atomic_fetch_nand8
+; X32: atomic_fetch_nand8
+ %t1 = atomicrmw nand i8* @sc8, i8 %x acquire
+; X64: andb
+; X64: notb
+; X64: lock
+; X64: cmpxchgb
+; X32: andb
+; X32: notb
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_max8(i8 %x) nounwind {
+ %t1 = atomicrmw max i8* @sc8, i8 %x acquire
+; X64: cmpb
+; X64: cmov
+; X64: lock
+; X64: cmpxchgb
+
+; X32: cmpb
+; X32: cmov
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_min8(i8 %x) nounwind {
+ %t1 = atomicrmw min i8* @sc8, i8 %x acquire
+; X64: cmpb
+; X64: cmov
+; X64: lock
+; X64: cmpxchgb
+
+; X32: cmpb
+; X32: cmov
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_umax8(i8 %x) nounwind {
+ %t1 = atomicrmw umax i8* @sc8, i8 %x acquire
+; X64: cmpb
+; X64: cmov
+; X64: lock
+; X64: cmpxchgb
+
+; X32: cmpb
+; X32: cmov
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_umin8(i8 %x) nounwind {
+ %t1 = atomicrmw umin i8* @sc8, i8 %x acquire
+; X64: cmpb
+; X64: cmov
+; X64: lock
+; X64: cmpxchgb
+; X32: cmpb
+; X32: cmov
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_cmpxchg8() nounwind {
+ %t1 = cmpxchg i8* @sc8, i8 0, i8 1 acquire
+; X64: lock
+; X64: cmpxchgb
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_store8(i8 %x) nounwind {
+ store atomic i8 %x, i8* @sc8 release, align 4
+; X64-NOT: lock
+; X64: movb
+; X32-NOT: lock
+; X32: movb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_swap8(i8 %x) nounwind {
+ %t1 = atomicrmw xchg i8* @sc8, i8 %x acquire
+; X64-NOT: lock
+; X64: xchgb
+; X32-NOT: lock
+; X32: xchgb
+ ret void
+; X64: ret
+; X32: ret
+}
diff --git a/test/CodeGen/X86/atomic_op.ll b/test/CodeGen/X86/atomic_op.ll
index 152bece..c5fa07d 100644
--- a/test/CodeGen/X86/atomic_op.ll
+++ b/test/CodeGen/X86/atomic_op.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=generic -march=x86 | FileCheck %s
+; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+cmov | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
@@ -107,13 +107,12 @@ entry:
; CHECK: cmpxchgl
%17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic
store i32 %17, i32* %old
+ ; CHECK: movl [[R17atomic:.*]], %eax
; CHECK: movl $1401, %[[R17mask:[a-z]*]]
- ; CHECK: movl [[R17atomic:.*]], %eax
- ; CHECK: movl %eax, %[[R17newval:[a-z]*]]
- ; CHECK: andl %[[R17mask]], %[[R17newval]]
- ; CHECK: notl %[[R17newval]]
+ ; CHECK: andl %eax, %[[R17mask]]
+ ; CHECK: notl %[[R17mask]]
; CHECK: lock
- ; CHECK: cmpxchgl %[[R17newval]], [[R17atomic]]
+ ; CHECK: cmpxchgl %[[R17mask]], [[R17atomic]]
; CHECK: jne
; CHECK: movl %eax,
%18 = atomicrmw nand i32* %val2, i32 1401 monotonic
diff --git a/test/CodeGen/X86/pr13458.ll b/test/CodeGen/X86/pr13458.ll
new file mode 100644
index 0000000..55548b3
--- /dev/null
+++ b/test/CodeGen/X86/pr13458.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-darwin11.4.2"
+
+%v8_uniform_Stats.0.2.4.10 = type { i64, i64, i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i64, i32, i32, i32, i32, i32, i32, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i32, i64, [7 x i32], [7 x i64] }
+
+@globalStats = external global %v8_uniform_Stats.0.2.4.10
+
+define void @MergeStats() nounwind {
+allocas:
+ %r.i.i720 = atomicrmw max i64* getelementptr inbounds (%v8_uniform_Stats.0.2.4.10* @globalStats, i64 0, i32 30), i64 0 seq_cst
+ ret void
+}