diff options
Diffstat (limited to 'test/CodeGen/X86/atomic_mi.ll')
-rw-r--r-- | test/CodeGen/X86/atomic_mi.ll | 60 |
1 files changed, 30 insertions, 30 deletions
diff --git a/test/CodeGen/X86/atomic_mi.ll b/test/CodeGen/X86/atomic_mi.ll index 19e019e..7a6204f 100644 --- a/test/CodeGen/X86/atomic_mi.ll +++ b/test/CodeGen/X86/atomic_mi.ll @@ -103,7 +103,7 @@ define void @add_8(i8* %p) { ; X32-NOT: lock ; X32: addb ; X32-NOT: movb - %1 = load atomic i8* %p seq_cst, align 1 + %1 = load atomic i8, i8* %p seq_cst, align 1 %2 = add i8 %1, 2 store atomic i8 %2, i8* %p release, align 1 ret void @@ -116,7 +116,7 @@ define void @add_16(i16* %p) { ; X64-NOT: addw ; X32-LABEL: add_16 ; X32-NOT: addw - %1 = load atomic i16* %p acquire, align 2 + %1 = load atomic i16, i16* %p acquire, align 2 %2 = add i16 %1, 2 store atomic i16 %2, i16* %p release, align 2 ret void @@ -131,7 +131,7 @@ define void @add_32(i32* %p) { ; X32-NOT: lock ; X32: addl ; X32-NOT: movl - %1 = load atomic i32* %p acquire, align 4 + %1 = load atomic i32, i32* %p acquire, align 4 %2 = add i32 %1, 2 store atomic i32 %2, i32* %p monotonic, align 4 ret void @@ -144,7 +144,7 @@ define void @add_64(i64* %p) { ; X64-NOT: movq ; We do not check X86-32 as it cannot do 'addq'. ; X32-LABEL: add_64 - %1 = load atomic i64* %p acquire, align 8 + %1 = load atomic i64, i64* %p acquire, align 8 %2 = add i64 %1, 2 store atomic i64 %2, i64* %p release, align 8 ret void @@ -155,7 +155,7 @@ define void @add_32_seq_cst(i32* %p) { ; X64: xchgl ; X32-LABEL: add_32_seq_cst ; X32: xchgl - %1 = load atomic i32* %p monotonic, align 4 + %1 = load atomic i32, i32* %p monotonic, align 4 %2 = add i32 %1, 2 store atomic i32 %2, i32* %p seq_cst, align 4 ret void @@ -172,7 +172,7 @@ define void @and_8(i8* %p) { ; X32-NOT: lock ; X32: andb ; X32-NOT: movb - %1 = load atomic i8* %p monotonic, align 1 + %1 = load atomic i8, i8* %p monotonic, align 1 %2 = and i8 %1, 2 store atomic i8 %2, i8* %p release, align 1 ret void @@ -185,7 +185,7 @@ define void @and_16(i16* %p) { ; X64-NOT: andw ; X32-LABEL: and_16 ; X32-NOT: andw - %1 = load atomic i16* %p acquire, align 2 + %1 = load atomic i16, i16* %p acquire, align 2 %2 = and i16 %1, 2 store atomic i16 %2, i16* %p release, align 2 ret void @@ -200,7 +200,7 @@ define void @and_32(i32* %p) { ; X32-NOT: lock ; X32: andl ; X32-NOT: movl - %1 = load atomic i32* %p acquire, align 4 + %1 = load atomic i32, i32* %p acquire, align 4 %2 = and i32 %1, 2 store atomic i32 %2, i32* %p release, align 4 ret void @@ -213,7 +213,7 @@ define void @and_64(i64* %p) { ; X64-NOT: movq ; We do not check X86-32 as it cannot do 'andq'. ; X32-LABEL: and_64 - %1 = load atomic i64* %p acquire, align 8 + %1 = load atomic i64, i64* %p acquire, align 8 %2 = and i64 %1, 2 store atomic i64 %2, i64* %p release, align 8 ret void @@ -224,7 +224,7 @@ define void @and_32_seq_cst(i32* %p) { ; X64: xchgl ; X32-LABEL: and_32_seq_cst ; X32: xchgl - %1 = load atomic i32* %p monotonic, align 4 + %1 = load atomic i32, i32* %p monotonic, align 4 %2 = and i32 %1, 2 store atomic i32 %2, i32* %p seq_cst, align 4 ret void @@ -241,7 +241,7 @@ define void @or_8(i8* %p) { ; X32-NOT: lock ; X32: orb ; X32-NOT: movb - %1 = load atomic i8* %p acquire, align 1 + %1 = load atomic i8, i8* %p acquire, align 1 %2 = or i8 %1, 2 store atomic i8 %2, i8* %p release, align 1 ret void @@ -252,7 +252,7 @@ define void @or_16(i16* %p) { ; X64-NOT: orw ; X32-LABEL: or_16 ; X32-NOT: orw - %1 = load atomic i16* %p acquire, align 2 + %1 = load atomic i16, i16* %p acquire, align 2 %2 = or i16 %1, 2 store atomic i16 %2, i16* %p release, align 2 ret void @@ -267,7 +267,7 @@ define void @or_32(i32* %p) { ; X32-NOT: lock ; X32: orl ; X32-NOT: movl - %1 = load atomic i32* %p acquire, align 4 + %1 = load atomic i32, i32* %p acquire, align 4 %2 = or i32 %1, 2 store atomic i32 %2, i32* %p release, align 4 ret void @@ -280,7 +280,7 @@ define void @or_64(i64* %p) { ; X64-NOT: movq ; We do not check X86-32 as it cannot do 'orq'. ; X32-LABEL: or_64 - %1 = load atomic i64* %p acquire, align 8 + %1 = load atomic i64, i64* %p acquire, align 8 %2 = or i64 %1, 2 store atomic i64 %2, i64* %p release, align 8 ret void @@ -291,7 +291,7 @@ define void @or_32_seq_cst(i32* %p) { ; X64: xchgl ; X32-LABEL: or_32_seq_cst ; X32: xchgl - %1 = load atomic i32* %p monotonic, align 4 + %1 = load atomic i32, i32* %p monotonic, align 4 %2 = or i32 %1, 2 store atomic i32 %2, i32* %p seq_cst, align 4 ret void @@ -308,7 +308,7 @@ define void @xor_8(i8* %p) { ; X32-NOT: lock ; X32: xorb ; X32-NOT: movb - %1 = load atomic i8* %p acquire, align 1 + %1 = load atomic i8, i8* %p acquire, align 1 %2 = xor i8 %1, 2 store atomic i8 %2, i8* %p release, align 1 ret void @@ -319,7 +319,7 @@ define void @xor_16(i16* %p) { ; X64-NOT: xorw ; X32-LABEL: xor_16 ; X32-NOT: xorw - %1 = load atomic i16* %p acquire, align 2 + %1 = load atomic i16, i16* %p acquire, align 2 %2 = xor i16 %1, 2 store atomic i16 %2, i16* %p release, align 2 ret void @@ -334,7 +334,7 @@ define void @xor_32(i32* %p) { ; X32-NOT: lock ; X32: xorl ; X32-NOT: movl - %1 = load atomic i32* %p acquire, align 4 + %1 = load atomic i32, i32* %p acquire, align 4 %2 = xor i32 %1, 2 store atomic i32 %2, i32* %p release, align 4 ret void @@ -347,7 +347,7 @@ define void @xor_64(i64* %p) { ; X64-NOT: movq ; We do not check X86-32 as it cannot do 'xorq'. ; X32-LABEL: xor_64 - %1 = load atomic i64* %p acquire, align 8 + %1 = load atomic i64, i64* %p acquire, align 8 %2 = xor i64 %1, 2 store atomic i64 %2, i64* %p release, align 8 ret void @@ -358,7 +358,7 @@ define void @xor_32_seq_cst(i32* %p) { ; X64: xchgl ; X32-LABEL: xor_32_seq_cst ; X32: xchgl - %1 = load atomic i32* %p monotonic, align 4 + %1 = load atomic i32, i32* %p monotonic, align 4 %2 = xor i32 %1, 2 store atomic i32 %2, i32* %p seq_cst, align 4 ret void @@ -378,7 +378,7 @@ define void @inc_8(i8* %p) { ; SLOW_INC-LABEL: inc_8 ; SLOW_INC-NOT: incb ; SLOW_INC-NOT: movb - %1 = load atomic i8* %p seq_cst, align 1 + %1 = load atomic i8, i8* %p seq_cst, align 1 %2 = add i8 %1, 1 store atomic i8 %2, i8* %p release, align 1 ret void @@ -393,7 +393,7 @@ define void @inc_16(i16* %p) { ; X32-NOT: incw ; SLOW_INC-LABEL: inc_16 ; SLOW_INC-NOT: incw - %1 = load atomic i16* %p acquire, align 2 + %1 = load atomic i16, i16* %p acquire, align 2 %2 = add i16 %1, 1 store atomic i16 %2, i16* %p release, align 2 ret void @@ -411,7 +411,7 @@ define void @inc_32(i32* %p) { ; SLOW_INC-LABEL: inc_32 ; SLOW_INC-NOT: incl ; SLOW_INC-NOT: movl - %1 = load atomic i32* %p acquire, align 4 + %1 = load atomic i32, i32* %p acquire, align 4 %2 = add i32 %1, 1 store atomic i32 %2, i32* %p monotonic, align 4 ret void @@ -427,7 +427,7 @@ define void @inc_64(i64* %p) { ; SLOW_INC-LABEL: inc_64 ; SLOW_INC-NOT: incq ; SLOW_INC-NOT: movq - %1 = load atomic i64* %p acquire, align 8 + %1 = load atomic i64, i64* %p acquire, align 8 %2 = add i64 %1, 1 store atomic i64 %2, i64* %p release, align 8 ret void @@ -438,7 +438,7 @@ define void @inc_32_seq_cst(i32* %p) { ; X64: xchgl ; X32-LABEL: inc_32_seq_cst ; X32: xchgl - %1 = load atomic i32* %p monotonic, align 4 + %1 = load atomic i32, i32* %p monotonic, align 4 %2 = add i32 %1, 1 store atomic i32 %2, i32* %p seq_cst, align 4 ret void @@ -458,7 +458,7 @@ define void @dec_8(i8* %p) { ; SLOW_INC-LABEL: dec_8 ; SLOW_INC-NOT: decb ; SLOW_INC-NOT: movb - %1 = load atomic i8* %p seq_cst, align 1 + %1 = load atomic i8, i8* %p seq_cst, align 1 %2 = sub i8 %1, 1 store atomic i8 %2, i8* %p release, align 1 ret void @@ -473,7 +473,7 @@ define void @dec_16(i16* %p) { ; X32-NOT: decw ; SLOW_INC-LABEL: dec_16 ; SLOW_INC-NOT: decw - %1 = load atomic i16* %p acquire, align 2 + %1 = load atomic i16, i16* %p acquire, align 2 %2 = sub i16 %1, 1 store atomic i16 %2, i16* %p release, align 2 ret void @@ -491,7 +491,7 @@ define void @dec_32(i32* %p) { ; SLOW_INC-LABEL: dec_32 ; SLOW_INC-NOT: decl ; SLOW_INC-NOT: movl - %1 = load atomic i32* %p acquire, align 4 + %1 = load atomic i32, i32* %p acquire, align 4 %2 = sub i32 %1, 1 store atomic i32 %2, i32* %p monotonic, align 4 ret void @@ -507,7 +507,7 @@ define void @dec_64(i64* %p) { ; SLOW_INC-LABEL: dec_64 ; SLOW_INC-NOT: decq ; SLOW_INC-NOT: movq - %1 = load atomic i64* %p acquire, align 8 + %1 = load atomic i64, i64* %p acquire, align 8 %2 = sub i64 %1, 1 store atomic i64 %2, i64* %p release, align 8 ret void @@ -518,7 +518,7 @@ define void @dec_32_seq_cst(i32* %p) { ; X64: xchgl ; X32-LABEL: dec_32_seq_cst ; X32: xchgl - %1 = load atomic i32* %p monotonic, align 4 + %1 = load atomic i32, i32* %p monotonic, align 4 %2 = sub i32 %1, 1 store atomic i32 %2, i32* %p seq_cst, align 4 ret void |