diff options
Diffstat (limited to 'test/CodeGen/X86')
166 files changed, 5411 insertions, 3384 deletions
diff --git a/test/CodeGen/X86/2006-05-11-InstrSched.ll b/test/CodeGen/X86/2006-05-11-InstrSched.ll index 56d6aa9..a871ea1 100644 --- a/test/CodeGen/X86/2006-05-11-InstrSched.ll +++ b/test/CodeGen/X86/2006-05-11-InstrSched.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86 -mattr=+sse2 -stats -realign-stack=0 |&\ +; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=+sse2 -stats -realign-stack=0 |&\ ; RUN: grep {asm-printer} | grep 34 target datalayout = "e-p:32:32" diff --git a/test/CodeGen/X86/2006-07-19-ATTAsm.ll b/test/CodeGen/X86/2006-07-19-ATTAsm.ll deleted file mode 100644 index c8fd10f..0000000 --- a/test/CodeGen/X86/2006-07-19-ATTAsm.ll +++ /dev/null @@ -1,49 +0,0 @@ -; RUN: llc < %s -march=x86 -x86-asm-syntax=att -; PR834 -; END. - -target datalayout = "e-p:32:32" -target triple = "i386-unknown-freebsd6.1" - %llvm.dbg.anchor.type = type { i32, i32 } - %llvm.dbg.basictype.type = type { i32, { }*, i8*, { }*, i32, i64, i64, i64, i32, i32 } - %llvm.dbg.compile_unit.type = type { i32, { }*, i32, i8*, i8*, i8* } - %llvm.dbg.global_variable.type = type { i32, { }*, { }*, i8*, i8 *, i8*, { }*, i32, { }*, i1, i1, { }* } -@x = global i32 0 ; <i32*> [#uses=1] -@llvm.dbg.global_variable = internal constant %llvm.dbg.global_variable.type { - i32 327732, - { }* bitcast (%llvm.dbg.anchor.type* @llvm.dbg.global_variables to { }*), - { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*), - i8* getelementptr ([2 x i8]* @str, i64 0, i64 0), - i8* getelementptr ([2 x i8]* @str, i64 0, i64 0), - i8* null, - { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*), - i32 1, - { }* bitcast (%llvm.dbg.basictype.type* @llvm.dbg.basictype to { }*), - i1 false, - i1 true, - { }* bitcast (i32* @x to { }*) }, section "llvm.metadata" ; <%llvm.dbg.global_variable.type*> [#uses=0] -@llvm.dbg.global_variables = linkonce constant %llvm.dbg.anchor.type { i32 327680, i32 52 }, section "llvm.metadata" ; <%llvm.dbg.anchor.type*> [#uses=1] -@llvm.dbg.compile_unit = internal constant %llvm.dbg.compile_unit.type { - i32 327697, - { }* bitcast (%llvm.dbg.anchor.type* @llvm.dbg.compile_units to { }*), - i32 4, - i8* getelementptr ([10 x i8]* @str1, i64 0, i64 0), - i8* getelementptr ([32 x i8]* @str2, i64 0, i64 0), - i8* getelementptr ([45 x i8]* @str3, i64 0, i64 0) }, section "llvm.metadata" ; <%llvm.dbg.compile_unit.type*> [#uses=1] -@llvm.dbg.compile_units = linkonce constant %llvm.dbg.anchor.type { i32 327680, i32 17 }, section "llvm.metadata" ; <%llvm.dbg.anchor.type*> [#uses=1] -@str1 = internal constant [10 x i8] c"testb.cpp\00", section "llvm.metadata" ; <[10 x i8]*> [#uses=1] -@str2 = internal constant [32 x i8] c"/Sources/Projects/DwarfTesting/\00", section "llvm.metadata" ; <[32 x i8]*> [#uses=1] -@str3 = internal constant [45 x i8] c"4.0.1 LLVM (Apple Computer, Inc. build 5400)\00", section "llvm.metadata" ; <[45 x i8]*> [#uses=1] -@str = internal constant [2 x i8] c"x\00", section "llvm.metadata" ; <[2 x i8]*> [#uses=1] -@llvm.dbg.basictype = internal constant %llvm.dbg.basictype.type { - i32 327716, - { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*), - i8* getelementptr ([4 x i8]* @str4, i64 0, i64 0), - { }* null, - i32 0, - i64 32, - i64 32, - i64 0, - i32 0, - i32 5 }, section "llvm.metadata" ; <%llvm.dbg.basictype.type*> [#uses=1] -@str4 = internal constant [4 x i8] c"int\00", section "llvm.metadata" ; <[4 x i8]*> [#uses=1] diff --git a/test/CodeGen/X86/2007-05-07-InvokeSRet.ll b/test/CodeGen/X86/2007-05-07-InvokeSRet.ll index 22e2750..deb3999 100644 --- a/test/CodeGen/X86/2007-05-07-InvokeSRet.ll +++ b/test/CodeGen/X86/2007-05-07-InvokeSRet.ll @@ -11,5 +11,9 @@ entry: to label %return unwind label %return return: ; preds = %entry, %entry + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup ret void } + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll b/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll index b040095..266fd7b 100644 --- a/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll +++ b/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll @@ -1,4 +1,12 @@ -; RUN: llc < %s -march=x86 | not grep pushf +; RUN: llc < %s -march=x86 -mattr=+cmov | FileCheck %s +; +; Test scheduling a multi-use compare. We should neither spill flags +; nor clone the compare. +; CHECK: cmp +; CHECK-NOT: pushf +; CHECK: cmov +; CHECK-NOT: cmp +; CHECK: cmov %struct.indexentry = type { i32, i8*, i8*, i8*, i8*, i8* } diff --git a/test/CodeGen/X86/2008-04-17-CoalescerBug.ll b/test/CodeGen/X86/2008-04-17-CoalescerBug.ll index 77720aa..859041e 100644 --- a/test/CodeGen/X86/2008-04-17-CoalescerBug.ll +++ b/test/CodeGen/X86/2008-04-17-CoalescerBug.ll @@ -151,8 +151,12 @@ bb7819: ; preds = %bb3314 bb7834: ; preds = %bb7806, %invcont5831 br label %bb3261 lpad: ; preds = %bb7806, %bb5968, %invcont5814, %bb440.i8663, %bb155.i8541, %bb5657, %bb3306 + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup ret void lpad8185: ; preds = %invcont5831 + %exn8185 = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup ret void } @@ -169,3 +173,5 @@ declare %struct.wxStringBase* @_ZN12wxStringBase6appendEmw(%struct.wxStringBase* declare %struct.wxStringBase* @_ZN12wxStringBaseaSEPKw(%struct.wxStringBase*, i32*) declare void @_ZN8wxString6FormatEPKwz(%struct.wxString* noalias sret , i32*, ...) + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll b/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll index 94c95d4..0d11546 100644 --- a/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll +++ b/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll @@ -23,8 +23,12 @@ tmp12.i.i.i.i.i.noexc65: ; preds = %bb37 unreachable lpad243: ; preds = %bb37 - %eh_ptr244 = call i8* @llvm.eh.exception( ) ; <i8*> [#uses=1] + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup + %eh_ptr244 = extractvalue { i8*, i32 } %exn, 0 store i32 (...)** getelementptr ([5 x i32 (...)*]* @_ZTVN10Evaluation10GridOutputILi3EEE, i32 0, i32 2), i32 (...)*** null, align 8 %tmp133 = call i8* @__cxa_begin_catch( i8* %eh_ptr244 ) nounwind ; <i8*> [#uses=0] unreachable } + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/2008-08-19-SubAndFetch.ll b/test/CodeGen/X86/2008-08-19-SubAndFetch.ll index 8475e8d..360ec73 100644 --- a/test/CodeGen/X86/2008-08-19-SubAndFetch.ll +++ b/test/CodeGen/X86/2008-08-19-SubAndFetch.ll @@ -7,8 +7,6 @@ entry: ; CHECK: main: ; CHECK: lock ; CHECK: decq - tail call i64 @llvm.atomic.load.sub.i64.p0i64( i64* @var, i64 1 ) ; <i64>:0 [#uses=0] + atomicrmw sub i64* @var, i64 1 monotonic unreachable } - -declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind diff --git a/test/CodeGen/X86/2008-09-05-sinttofp-2xi32.ll b/test/CodeGen/X86/2008-09-05-sinttofp-2xi32.ll index 2dc1dea..757f1ff 100644 --- a/test/CodeGen/X86/2008-09-05-sinttofp-2xi32.ll +++ b/test/CodeGen/X86/2008-09-05-sinttofp-2xi32.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=x86 -mattr=+sse2 -mattr=+mmx | grep unpcklpd -; RUN: llc < %s -march=x86 -mattr=+sse2 -mattr=+mmx | grep unpckhpd +; RUN: llc < %s -march=x86 -mattr=+sse2 -mattr=+mmx | not grep unpcklpd +; RUN: llc < %s -march=x86 -mattr=+sse2 -mattr=+mmx | not grep unpckhpd ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep cvttpd2pi | count 1 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep cvtpi2pd | count 1 ; originally from PR2687, but things don't work that way any more. diff --git a/test/CodeGen/X86/2008-09-18-inline-asm-2.ll b/test/CodeGen/X86/2008-09-18-inline-asm-2.ll index dfd165c..511c7b5 100644 --- a/test/CodeGen/X86/2008-09-18-inline-asm-2.ll +++ b/test/CodeGen/X86/2008-09-18-inline-asm-2.ll @@ -18,7 +18,7 @@ ; CHECK-NOT: [[A3]] ; CHECK: 5th=[[A5:%...]] ; CHECK-NOT: [[A1]] -; CHECK-NOT; [[A5]] +; CHECK-NOT: [[A5]] ; CHECK: =4th ; The 6th operand is an 8-bit register, and it mustn't alias the 1st and 5th. diff --git a/test/CodeGen/X86/2008-10-02-Atomics32-2.ll b/test/CodeGen/X86/2008-10-02-Atomics32-2.ll deleted file mode 100644 index b48c4ad..0000000 --- a/test/CodeGen/X86/2008-10-02-Atomics32-2.ll +++ /dev/null @@ -1,969 +0,0 @@ -; RUN: llc < %s -march=x86 > %t -;; This version includes 64-bit version of binary operators (in 32-bit mode). -;; Swap, cmp-and-swap not supported yet in this mode. -; ModuleID = 'Atomics.c' -target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" -target triple = "i386-apple-darwin8" -@sc = common global i8 0 ; <i8*> [#uses=52] -@uc = common global i8 0 ; <i8*> [#uses=112] -@ss = common global i16 0 ; <i16*> [#uses=15] -@us = common global i16 0 ; <i16*> [#uses=15] -@si = common global i32 0 ; <i32*> [#uses=15] -@ui = common global i32 0 ; <i32*> [#uses=23] -@sl = common global i32 0 ; <i32*> [#uses=15] -@ul = common global i32 0 ; <i32*> [#uses=15] -@sll = common global i64 0, align 8 ; <i64*> [#uses=13] -@ull = common global i64 0, align 8 ; <i64*> [#uses=13] - -define void @test_op_ignore() nounwind { -entry: - %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] - %1 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] - %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %3 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %2, i16 1) ; <i16> [#uses=0] - %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %5 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %4, i16 1) ; <i16> [#uses=0] - %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %7 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %6, i32 1) ; <i32> [#uses=0] - %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %9 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %8, i32 1) ; <i32> [#uses=0] - %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %11 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %10, i32 1) ; <i32> [#uses=0] - %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %13 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %12, i32 1) ; <i32> [#uses=0] - %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %15 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %14, i64 1) ; <i64> [#uses=0] - %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %17 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %16, i64 1) ; <i64> [#uses=0] - %18 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] - %19 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] - %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %21 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %20, i16 1) ; <i16> [#uses=0] - %22 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %23 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %22, i16 1) ; <i16> [#uses=0] - %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %25 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %24, i32 1) ; <i32> [#uses=0] - %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %27 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %26, i32 1) ; <i32> [#uses=0] - %28 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %29 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %28, i32 1) ; <i32> [#uses=0] - %30 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %31 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %30, i32 1) ; <i32> [#uses=0] - %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %33 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %32, i64 1) ; <i64> [#uses=0] - %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %35 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %34, i64 1) ; <i64> [#uses=0] - %36 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] - %37 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] - %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %39 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %38, i16 1) ; <i16> [#uses=0] - %40 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %41 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %40, i16 1) ; <i16> [#uses=0] - %42 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %43 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %42, i32 1) ; <i32> [#uses=0] - %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %45 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %44, i32 1) ; <i32> [#uses=0] - %46 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %47 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %46, i32 1) ; <i32> [#uses=0] - %48 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %49 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %48, i32 1) ; <i32> [#uses=0] - %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %51 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %50, i64 1) ; <i64> [#uses=0] - %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %53 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %52, i64 1) ; <i64> [#uses=0] - %54 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] - %55 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] - %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %57 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %56, i16 1) ; <i16> [#uses=0] - %58 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %59 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %58, i16 1) ; <i16> [#uses=0] - %60 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %61 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %60, i32 1) ; <i32> [#uses=0] - %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %63 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %62, i32 1) ; <i32> [#uses=0] - %64 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %65 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %64, i32 1) ; <i32> [#uses=0] - %66 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %67 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %66, i32 1) ; <i32> [#uses=0] - %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %69 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %68, i64 1) ; <i64> [#uses=0] - %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %71 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %70, i64 1) ; <i64> [#uses=0] - %72 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] - %73 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] - %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %75 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %74, i16 1) ; <i16> [#uses=0] - %76 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %77 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %76, i16 1) ; <i16> [#uses=0] - %78 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %79 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %78, i32 1) ; <i32> [#uses=0] - %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %81 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %80, i32 1) ; <i32> [#uses=0] - %82 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %83 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %82, i32 1) ; <i32> [#uses=0] - %84 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %85 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %84, i32 1) ; <i32> [#uses=0] - %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %87 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %86, i64 1) ; <i64> [#uses=0] - %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %89 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %88, i64 1) ; <i64> [#uses=0] - %90 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] - %91 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] - %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %93 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %92, i16 1) ; <i16> [#uses=0] - %94 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %95 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %94, i16 1) ; <i16> [#uses=0] - %96 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %97 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %96, i32 1) ; <i32> [#uses=0] - %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %99 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %98, i32 1) ; <i32> [#uses=0] - %100 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %101 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %100, i32 1) ; <i32> [#uses=0] - %102 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %103 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %102, i32 1) ; <i32> [#uses=0] - %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %105 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %104, i64 1) ; <i64> [#uses=0] - %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %107 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %106, i64 1) ; <i64> [#uses=0] - br label %return - -return: ; preds = %entry - ret void -} - -declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind - -declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind - -declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind - -declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind - -declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind - -declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind - -define void @test_fetch_and_op() nounwind { -entry: - %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] - store i8 %0, i8* @sc, align 1 - %1 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] - store i8 %1, i8* @uc, align 1 - %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %3 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %2, i16 11) ; <i16> [#uses=1] - store i16 %3, i16* @ss, align 2 - %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %5 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %4, i16 11) ; <i16> [#uses=1] - store i16 %5, i16* @us, align 2 - %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %7 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %6, i32 11) ; <i32> [#uses=1] - store i32 %7, i32* @si, align 4 - %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %9 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %8, i32 11) ; <i32> [#uses=1] - store i32 %9, i32* @ui, align 4 - %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %11 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %10, i32 11) ; <i32> [#uses=1] - store i32 %11, i32* @sl, align 4 - %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %13 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %12, i32 11) ; <i32> [#uses=1] - store i32 %13, i32* @ul, align 4 - %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %15 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %14, i64 11) ; <i64> [#uses=1] - store i64 %15, i64* @sll, align 8 - %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %17 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %16, i64 11) ; <i64> [#uses=1] - store i64 %17, i64* @ull, align 8 - %18 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] - store i8 %18, i8* @sc, align 1 - %19 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] - store i8 %19, i8* @uc, align 1 - %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %21 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %20, i16 11) ; <i16> [#uses=1] - store i16 %21, i16* @ss, align 2 - %22 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %23 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %22, i16 11) ; <i16> [#uses=1] - store i16 %23, i16* @us, align 2 - %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %25 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %24, i32 11) ; <i32> [#uses=1] - store i32 %25, i32* @si, align 4 - %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %27 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %26, i32 11) ; <i32> [#uses=1] - store i32 %27, i32* @ui, align 4 - %28 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %29 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %28, i32 11) ; <i32> [#uses=1] - store i32 %29, i32* @sl, align 4 - %30 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %31 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %30, i32 11) ; <i32> [#uses=1] - store i32 %31, i32* @ul, align 4 - %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %33 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %32, i64 11) ; <i64> [#uses=1] - store i64 %33, i64* @sll, align 8 - %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %35 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %34, i64 11) ; <i64> [#uses=1] - store i64 %35, i64* @ull, align 8 - %36 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] - store i8 %36, i8* @sc, align 1 - %37 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] - store i8 %37, i8* @uc, align 1 - %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %39 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %38, i16 11) ; <i16> [#uses=1] - store i16 %39, i16* @ss, align 2 - %40 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %41 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %40, i16 11) ; <i16> [#uses=1] - store i16 %41, i16* @us, align 2 - %42 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %43 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %42, i32 11) ; <i32> [#uses=1] - store i32 %43, i32* @si, align 4 - %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %45 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %44, i32 11) ; <i32> [#uses=1] - store i32 %45, i32* @ui, align 4 - %46 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %47 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %46, i32 11) ; <i32> [#uses=1] - store i32 %47, i32* @sl, align 4 - %48 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %49 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %48, i32 11) ; <i32> [#uses=1] - store i32 %49, i32* @ul, align 4 - %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %51 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %50, i64 11) ; <i64> [#uses=1] - store i64 %51, i64* @sll, align 8 - %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %53 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %52, i64 11) ; <i64> [#uses=1] - store i64 %53, i64* @ull, align 8 - %54 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] - store i8 %54, i8* @sc, align 1 - %55 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] - store i8 %55, i8* @uc, align 1 - %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %57 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %56, i16 11) ; <i16> [#uses=1] - store i16 %57, i16* @ss, align 2 - %58 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %59 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %58, i16 11) ; <i16> [#uses=1] - store i16 %59, i16* @us, align 2 - %60 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %61 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %60, i32 11) ; <i32> [#uses=1] - store i32 %61, i32* @si, align 4 - %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %63 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %62, i32 11) ; <i32> [#uses=1] - store i32 %63, i32* @ui, align 4 - %64 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %65 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %64, i32 11) ; <i32> [#uses=1] - store i32 %65, i32* @sl, align 4 - %66 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %67 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %66, i32 11) ; <i32> [#uses=1] - store i32 %67, i32* @ul, align 4 - %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %69 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %68, i64 11) ; <i64> [#uses=1] - store i64 %69, i64* @sll, align 8 - %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %71 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %70, i64 11) ; <i64> [#uses=1] - store i64 %71, i64* @ull, align 8 - %72 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] - store i8 %72, i8* @sc, align 1 - %73 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] - store i8 %73, i8* @uc, align 1 - %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %75 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %74, i16 11) ; <i16> [#uses=1] - store i16 %75, i16* @ss, align 2 - %76 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %77 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %76, i16 11) ; <i16> [#uses=1] - store i16 %77, i16* @us, align 2 - %78 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %79 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %78, i32 11) ; <i32> [#uses=1] - store i32 %79, i32* @si, align 4 - %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %81 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %80, i32 11) ; <i32> [#uses=1] - store i32 %81, i32* @ui, align 4 - %82 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %83 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %82, i32 11) ; <i32> [#uses=1] - store i32 %83, i32* @sl, align 4 - %84 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %85 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %84, i32 11) ; <i32> [#uses=1] - store i32 %85, i32* @ul, align 4 - %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %87 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %86, i64 11) ; <i64> [#uses=1] - store i64 %87, i64* @sll, align 8 - %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %89 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %88, i64 11) ; <i64> [#uses=1] - store i64 %89, i64* @ull, align 8 - %90 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] - store i8 %90, i8* @sc, align 1 - %91 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] - store i8 %91, i8* @uc, align 1 - %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %93 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %92, i16 11) ; <i16> [#uses=1] - store i16 %93, i16* @ss, align 2 - %94 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %95 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %94, i16 11) ; <i16> [#uses=1] - store i16 %95, i16* @us, align 2 - %96 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %97 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %96, i32 11) ; <i32> [#uses=1] - store i32 %97, i32* @si, align 4 - %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %99 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %98, i32 11) ; <i32> [#uses=1] - store i32 %99, i32* @ui, align 4 - %100 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %101 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %100, i32 11) ; <i32> [#uses=1] - store i32 %101, i32* @sl, align 4 - %102 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %103 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %102, i32 11) ; <i32> [#uses=1] - store i32 %103, i32* @ul, align 4 - %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %105 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %104, i64 11) ; <i64> [#uses=1] - store i64 %105, i64* @sll, align 8 - %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %107 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %106, i64 11) ; <i64> [#uses=1] - store i64 %107, i64* @ull, align 8 - br label %return - -return: ; preds = %entry - ret void -} - -define void @test_op_and_fetch() nounwind { -entry: - %0 = load i8* @uc, align 1 ; <i8> [#uses=1] - %1 = zext i8 %0 to i32 ; <i32> [#uses=1] - %2 = trunc i32 %1 to i8 ; <i8> [#uses=2] - %3 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 %2) ; <i8> [#uses=1] - %4 = add i8 %3, %2 ; <i8> [#uses=1] - store i8 %4, i8* @sc, align 1 - %5 = load i8* @uc, align 1 ; <i8> [#uses=1] - %6 = zext i8 %5 to i32 ; <i32> [#uses=1] - %7 = trunc i32 %6 to i8 ; <i8> [#uses=2] - %8 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 %7) ; <i8> [#uses=1] - %9 = add i8 %8, %7 ; <i8> [#uses=1] - store i8 %9, i8* @uc, align 1 - %10 = load i8* @uc, align 1 ; <i8> [#uses=1] - %11 = zext i8 %10 to i32 ; <i32> [#uses=1] - %12 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %13 = trunc i32 %11 to i16 ; <i16> [#uses=2] - %14 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %12, i16 %13) ; <i16> [#uses=1] - %15 = add i16 %14, %13 ; <i16> [#uses=1] - store i16 %15, i16* @ss, align 2 - %16 = load i8* @uc, align 1 ; <i8> [#uses=1] - %17 = zext i8 %16 to i32 ; <i32> [#uses=1] - %18 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %19 = trunc i32 %17 to i16 ; <i16> [#uses=2] - %20 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %18, i16 %19) ; <i16> [#uses=1] - %21 = add i16 %20, %19 ; <i16> [#uses=1] - store i16 %21, i16* @us, align 2 - %22 = load i8* @uc, align 1 ; <i8> [#uses=1] - %23 = zext i8 %22 to i32 ; <i32> [#uses=2] - %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %25 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %24, i32 %23) ; <i32> [#uses=1] - %26 = add i32 %25, %23 ; <i32> [#uses=1] - store i32 %26, i32* @si, align 4 - %27 = load i8* @uc, align 1 ; <i8> [#uses=1] - %28 = zext i8 %27 to i32 ; <i32> [#uses=2] - %29 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %30 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %29, i32 %28) ; <i32> [#uses=1] - %31 = add i32 %30, %28 ; <i32> [#uses=1] - store i32 %31, i32* @ui, align 4 - %32 = load i8* @uc, align 1 ; <i8> [#uses=1] - %33 = zext i8 %32 to i32 ; <i32> [#uses=2] - %34 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %35 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %34, i32 %33) ; <i32> [#uses=1] - %36 = add i32 %35, %33 ; <i32> [#uses=1] - store i32 %36, i32* @sl, align 4 - %37 = load i8* @uc, align 1 ; <i8> [#uses=1] - %38 = zext i8 %37 to i32 ; <i32> [#uses=2] - %39 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %40 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %39, i32 %38) ; <i32> [#uses=1] - %41 = add i32 %40, %38 ; <i32> [#uses=1] - store i32 %41, i32* @ul, align 4 - %42 = load i8* @uc, align 1 ; <i8> [#uses=1] - %43 = zext i8 %42 to i64 ; <i64> [#uses=2] - %44 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %45 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %44, i64 %43) ; <i64> [#uses=1] - %46 = add i64 %45, %43 ; <i64> [#uses=1] - store i64 %46, i64* @sll, align 8 - %47 = load i8* @uc, align 1 ; <i8> [#uses=1] - %48 = zext i8 %47 to i64 ; <i64> [#uses=2] - %49 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %50 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %49, i64 %48) ; <i64> [#uses=1] - %51 = add i64 %50, %48 ; <i64> [#uses=1] - store i64 %51, i64* @ull, align 8 - %52 = load i8* @uc, align 1 ; <i8> [#uses=1] - %53 = zext i8 %52 to i32 ; <i32> [#uses=1] - %54 = trunc i32 %53 to i8 ; <i8> [#uses=2] - %55 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 %54) ; <i8> [#uses=1] - %56 = sub i8 %55, %54 ; <i8> [#uses=1] - store i8 %56, i8* @sc, align 1 - %57 = load i8* @uc, align 1 ; <i8> [#uses=1] - %58 = zext i8 %57 to i32 ; <i32> [#uses=1] - %59 = trunc i32 %58 to i8 ; <i8> [#uses=2] - %60 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 %59) ; <i8> [#uses=1] - %61 = sub i8 %60, %59 ; <i8> [#uses=1] - store i8 %61, i8* @uc, align 1 - %62 = load i8* @uc, align 1 ; <i8> [#uses=1] - %63 = zext i8 %62 to i32 ; <i32> [#uses=1] - %64 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %65 = trunc i32 %63 to i16 ; <i16> [#uses=2] - %66 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %64, i16 %65) ; <i16> [#uses=1] - %67 = sub i16 %66, %65 ; <i16> [#uses=1] - store i16 %67, i16* @ss, align 2 - %68 = load i8* @uc, align 1 ; <i8> [#uses=1] - %69 = zext i8 %68 to i32 ; <i32> [#uses=1] - %70 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %71 = trunc i32 %69 to i16 ; <i16> [#uses=2] - %72 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %70, i16 %71) ; <i16> [#uses=1] - %73 = sub i16 %72, %71 ; <i16> [#uses=1] - store i16 %73, i16* @us, align 2 - %74 = load i8* @uc, align 1 ; <i8> [#uses=1] - %75 = zext i8 %74 to i32 ; <i32> [#uses=2] - %76 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %77 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %76, i32 %75) ; <i32> [#uses=1] - %78 = sub i32 %77, %75 ; <i32> [#uses=1] - store i32 %78, i32* @si, align 4 - %79 = load i8* @uc, align 1 ; <i8> [#uses=1] - %80 = zext i8 %79 to i32 ; <i32> [#uses=2] - %81 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %82 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %81, i32 %80) ; <i32> [#uses=1] - %83 = sub i32 %82, %80 ; <i32> [#uses=1] - store i32 %83, i32* @ui, align 4 - %84 = load i8* @uc, align 1 ; <i8> [#uses=1] - %85 = zext i8 %84 to i32 ; <i32> [#uses=2] - %86 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %87 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %86, i32 %85) ; <i32> [#uses=1] - %88 = sub i32 %87, %85 ; <i32> [#uses=1] - store i32 %88, i32* @sl, align 4 - %89 = load i8* @uc, align 1 ; <i8> [#uses=1] - %90 = zext i8 %89 to i32 ; <i32> [#uses=2] - %91 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %92 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %91, i32 %90) ; <i32> [#uses=1] - %93 = sub i32 %92, %90 ; <i32> [#uses=1] - store i32 %93, i32* @ul, align 4 - %94 = load i8* @uc, align 1 ; <i8> [#uses=1] - %95 = zext i8 %94 to i64 ; <i64> [#uses=2] - %96 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %97 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %96, i64 %95) ; <i64> [#uses=1] - %98 = sub i64 %97, %95 ; <i64> [#uses=1] - store i64 %98, i64* @sll, align 8 - %99 = load i8* @uc, align 1 ; <i8> [#uses=1] - %100 = zext i8 %99 to i64 ; <i64> [#uses=2] - %101 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %102 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %101, i64 %100) ; <i64> [#uses=1] - %103 = sub i64 %102, %100 ; <i64> [#uses=1] - store i64 %103, i64* @ull, align 8 - %104 = load i8* @uc, align 1 ; <i8> [#uses=1] - %105 = zext i8 %104 to i32 ; <i32> [#uses=1] - %106 = trunc i32 %105 to i8 ; <i8> [#uses=2] - %107 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 %106) ; <i8> [#uses=1] - %108 = or i8 %107, %106 ; <i8> [#uses=1] - store i8 %108, i8* @sc, align 1 - %109 = load i8* @uc, align 1 ; <i8> [#uses=1] - %110 = zext i8 %109 to i32 ; <i32> [#uses=1] - %111 = trunc i32 %110 to i8 ; <i8> [#uses=2] - %112 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 %111) ; <i8> [#uses=1] - %113 = or i8 %112, %111 ; <i8> [#uses=1] - store i8 %113, i8* @uc, align 1 - %114 = load i8* @uc, align 1 ; <i8> [#uses=1] - %115 = zext i8 %114 to i32 ; <i32> [#uses=1] - %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %117 = trunc i32 %115 to i16 ; <i16> [#uses=2] - %118 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %116, i16 %117) ; <i16> [#uses=1] - %119 = or i16 %118, %117 ; <i16> [#uses=1] - store i16 %119, i16* @ss, align 2 - %120 = load i8* @uc, align 1 ; <i8> [#uses=1] - %121 = zext i8 %120 to i32 ; <i32> [#uses=1] - %122 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %123 = trunc i32 %121 to i16 ; <i16> [#uses=2] - %124 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %122, i16 %123) ; <i16> [#uses=1] - %125 = or i16 %124, %123 ; <i16> [#uses=1] - store i16 %125, i16* @us, align 2 - %126 = load i8* @uc, align 1 ; <i8> [#uses=1] - %127 = zext i8 %126 to i32 ; <i32> [#uses=2] - %128 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %129 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %128, i32 %127) ; <i32> [#uses=1] - %130 = or i32 %129, %127 ; <i32> [#uses=1] - store i32 %130, i32* @si, align 4 - %131 = load i8* @uc, align 1 ; <i8> [#uses=1] - %132 = zext i8 %131 to i32 ; <i32> [#uses=2] - %133 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %134 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %133, i32 %132) ; <i32> [#uses=1] - %135 = or i32 %134, %132 ; <i32> [#uses=1] - store i32 %135, i32* @ui, align 4 - %136 = load i8* @uc, align 1 ; <i8> [#uses=1] - %137 = zext i8 %136 to i32 ; <i32> [#uses=2] - %138 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %139 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %138, i32 %137) ; <i32> [#uses=1] - %140 = or i32 %139, %137 ; <i32> [#uses=1] - store i32 %140, i32* @sl, align 4 - %141 = load i8* @uc, align 1 ; <i8> [#uses=1] - %142 = zext i8 %141 to i32 ; <i32> [#uses=2] - %143 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %144 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %143, i32 %142) ; <i32> [#uses=1] - %145 = or i32 %144, %142 ; <i32> [#uses=1] - store i32 %145, i32* @ul, align 4 - %146 = load i8* @uc, align 1 ; <i8> [#uses=1] - %147 = zext i8 %146 to i64 ; <i64> [#uses=2] - %148 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %149 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %148, i64 %147) ; <i64> [#uses=1] - %150 = or i64 %149, %147 ; <i64> [#uses=1] - store i64 %150, i64* @sll, align 8 - %151 = load i8* @uc, align 1 ; <i8> [#uses=1] - %152 = zext i8 %151 to i64 ; <i64> [#uses=2] - %153 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %154 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %153, i64 %152) ; <i64> [#uses=1] - %155 = or i64 %154, %152 ; <i64> [#uses=1] - store i64 %155, i64* @ull, align 8 - %156 = load i8* @uc, align 1 ; <i8> [#uses=1] - %157 = zext i8 %156 to i32 ; <i32> [#uses=1] - %158 = trunc i32 %157 to i8 ; <i8> [#uses=2] - %159 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 %158) ; <i8> [#uses=1] - %160 = xor i8 %159, %158 ; <i8> [#uses=1] - store i8 %160, i8* @sc, align 1 - %161 = load i8* @uc, align 1 ; <i8> [#uses=1] - %162 = zext i8 %161 to i32 ; <i32> [#uses=1] - %163 = trunc i32 %162 to i8 ; <i8> [#uses=2] - %164 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 %163) ; <i8> [#uses=1] - %165 = xor i8 %164, %163 ; <i8> [#uses=1] - store i8 %165, i8* @uc, align 1 - %166 = load i8* @uc, align 1 ; <i8> [#uses=1] - %167 = zext i8 %166 to i32 ; <i32> [#uses=1] - %168 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %169 = trunc i32 %167 to i16 ; <i16> [#uses=2] - %170 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %168, i16 %169) ; <i16> [#uses=1] - %171 = xor i16 %170, %169 ; <i16> [#uses=1] - store i16 %171, i16* @ss, align 2 - %172 = load i8* @uc, align 1 ; <i8> [#uses=1] - %173 = zext i8 %172 to i32 ; <i32> [#uses=1] - %174 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %175 = trunc i32 %173 to i16 ; <i16> [#uses=2] - %176 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %174, i16 %175) ; <i16> [#uses=1] - %177 = xor i16 %176, %175 ; <i16> [#uses=1] - store i16 %177, i16* @us, align 2 - %178 = load i8* @uc, align 1 ; <i8> [#uses=1] - %179 = zext i8 %178 to i32 ; <i32> [#uses=2] - %180 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %181 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %180, i32 %179) ; <i32> [#uses=1] - %182 = xor i32 %181, %179 ; <i32> [#uses=1] - store i32 %182, i32* @si, align 4 - %183 = load i8* @uc, align 1 ; <i8> [#uses=1] - %184 = zext i8 %183 to i32 ; <i32> [#uses=2] - %185 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %186 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %185, i32 %184) ; <i32> [#uses=1] - %187 = xor i32 %186, %184 ; <i32> [#uses=1] - store i32 %187, i32* @ui, align 4 - %188 = load i8* @uc, align 1 ; <i8> [#uses=1] - %189 = zext i8 %188 to i32 ; <i32> [#uses=2] - %190 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %191 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %190, i32 %189) ; <i32> [#uses=1] - %192 = xor i32 %191, %189 ; <i32> [#uses=1] - store i32 %192, i32* @sl, align 4 - %193 = load i8* @uc, align 1 ; <i8> [#uses=1] - %194 = zext i8 %193 to i32 ; <i32> [#uses=2] - %195 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %196 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %195, i32 %194) ; <i32> [#uses=1] - %197 = xor i32 %196, %194 ; <i32> [#uses=1] - store i32 %197, i32* @ul, align 4 - %198 = load i8* @uc, align 1 ; <i8> [#uses=1] - %199 = zext i8 %198 to i64 ; <i64> [#uses=2] - %200 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %201 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %200, i64 %199) ; <i64> [#uses=1] - %202 = xor i64 %201, %199 ; <i64> [#uses=1] - store i64 %202, i64* @sll, align 8 - %203 = load i8* @uc, align 1 ; <i8> [#uses=1] - %204 = zext i8 %203 to i64 ; <i64> [#uses=2] - %205 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %206 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %205, i64 %204) ; <i64> [#uses=1] - %207 = xor i64 %206, %204 ; <i64> [#uses=1] - store i64 %207, i64* @ull, align 8 - %208 = load i8* @uc, align 1 ; <i8> [#uses=1] - %209 = zext i8 %208 to i32 ; <i32> [#uses=1] - %210 = trunc i32 %209 to i8 ; <i8> [#uses=2] - %211 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 %210) ; <i8> [#uses=1] - %212 = and i8 %211, %210 ; <i8> [#uses=1] - store i8 %212, i8* @sc, align 1 - %213 = load i8* @uc, align 1 ; <i8> [#uses=1] - %214 = zext i8 %213 to i32 ; <i32> [#uses=1] - %215 = trunc i32 %214 to i8 ; <i8> [#uses=2] - %216 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 %215) ; <i8> [#uses=1] - %217 = and i8 %216, %215 ; <i8> [#uses=1] - store i8 %217, i8* @uc, align 1 - %218 = load i8* @uc, align 1 ; <i8> [#uses=1] - %219 = zext i8 %218 to i32 ; <i32> [#uses=1] - %220 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %221 = trunc i32 %219 to i16 ; <i16> [#uses=2] - %222 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %220, i16 %221) ; <i16> [#uses=1] - %223 = and i16 %222, %221 ; <i16> [#uses=1] - store i16 %223, i16* @ss, align 2 - %224 = load i8* @uc, align 1 ; <i8> [#uses=1] - %225 = zext i8 %224 to i32 ; <i32> [#uses=1] - %226 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %227 = trunc i32 %225 to i16 ; <i16> [#uses=2] - %228 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %226, i16 %227) ; <i16> [#uses=1] - %229 = and i16 %228, %227 ; <i16> [#uses=1] - store i16 %229, i16* @us, align 2 - %230 = load i8* @uc, align 1 ; <i8> [#uses=1] - %231 = zext i8 %230 to i32 ; <i32> [#uses=2] - %232 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %233 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %232, i32 %231) ; <i32> [#uses=1] - %234 = and i32 %233, %231 ; <i32> [#uses=1] - store i32 %234, i32* @si, align 4 - %235 = load i8* @uc, align 1 ; <i8> [#uses=1] - %236 = zext i8 %235 to i32 ; <i32> [#uses=2] - %237 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %238 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %237, i32 %236) ; <i32> [#uses=1] - %239 = and i32 %238, %236 ; <i32> [#uses=1] - store i32 %239, i32* @ui, align 4 - %240 = load i8* @uc, align 1 ; <i8> [#uses=1] - %241 = zext i8 %240 to i32 ; <i32> [#uses=2] - %242 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %243 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %242, i32 %241) ; <i32> [#uses=1] - %244 = and i32 %243, %241 ; <i32> [#uses=1] - store i32 %244, i32* @sl, align 4 - %245 = load i8* @uc, align 1 ; <i8> [#uses=1] - %246 = zext i8 %245 to i32 ; <i32> [#uses=2] - %247 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %248 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %247, i32 %246) ; <i32> [#uses=1] - %249 = and i32 %248, %246 ; <i32> [#uses=1] - store i32 %249, i32* @ul, align 4 - %250 = load i8* @uc, align 1 ; <i8> [#uses=1] - %251 = zext i8 %250 to i64 ; <i64> [#uses=2] - %252 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %253 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %252, i64 %251) ; <i64> [#uses=1] - %254 = and i64 %253, %251 ; <i64> [#uses=1] - store i64 %254, i64* @sll, align 8 - %255 = load i8* @uc, align 1 ; <i8> [#uses=1] - %256 = zext i8 %255 to i64 ; <i64> [#uses=2] - %257 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %258 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %257, i64 %256) ; <i64> [#uses=1] - %259 = and i64 %258, %256 ; <i64> [#uses=1] - store i64 %259, i64* @ull, align 8 - %260 = load i8* @uc, align 1 ; <i8> [#uses=1] - %261 = zext i8 %260 to i32 ; <i32> [#uses=1] - %262 = trunc i32 %261 to i8 ; <i8> [#uses=2] - %263 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 %262) ; <i8> [#uses=1] - %264 = xor i8 %263, -1 ; <i8> [#uses=1] - %265 = and i8 %264, %262 ; <i8> [#uses=1] - store i8 %265, i8* @sc, align 1 - %266 = load i8* @uc, align 1 ; <i8> [#uses=1] - %267 = zext i8 %266 to i32 ; <i32> [#uses=1] - %268 = trunc i32 %267 to i8 ; <i8> [#uses=2] - %269 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 %268) ; <i8> [#uses=1] - %270 = xor i8 %269, -1 ; <i8> [#uses=1] - %271 = and i8 %270, %268 ; <i8> [#uses=1] - store i8 %271, i8* @uc, align 1 - %272 = load i8* @uc, align 1 ; <i8> [#uses=1] - %273 = zext i8 %272 to i32 ; <i32> [#uses=1] - %274 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %275 = trunc i32 %273 to i16 ; <i16> [#uses=2] - %276 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %274, i16 %275) ; <i16> [#uses=1] - %277 = xor i16 %276, -1 ; <i16> [#uses=1] - %278 = and i16 %277, %275 ; <i16> [#uses=1] - store i16 %278, i16* @ss, align 2 - %279 = load i8* @uc, align 1 ; <i8> [#uses=1] - %280 = zext i8 %279 to i32 ; <i32> [#uses=1] - %281 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %282 = trunc i32 %280 to i16 ; <i16> [#uses=2] - %283 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %281, i16 %282) ; <i16> [#uses=1] - %284 = xor i16 %283, -1 ; <i16> [#uses=1] - %285 = and i16 %284, %282 ; <i16> [#uses=1] - store i16 %285, i16* @us, align 2 - %286 = load i8* @uc, align 1 ; <i8> [#uses=1] - %287 = zext i8 %286 to i32 ; <i32> [#uses=2] - %288 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %289 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %288, i32 %287) ; <i32> [#uses=1] - %290 = xor i32 %289, -1 ; <i32> [#uses=1] - %291 = and i32 %290, %287 ; <i32> [#uses=1] - store i32 %291, i32* @si, align 4 - %292 = load i8* @uc, align 1 ; <i8> [#uses=1] - %293 = zext i8 %292 to i32 ; <i32> [#uses=2] - %294 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %295 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %294, i32 %293) ; <i32> [#uses=1] - %296 = xor i32 %295, -1 ; <i32> [#uses=1] - %297 = and i32 %296, %293 ; <i32> [#uses=1] - store i32 %297, i32* @ui, align 4 - %298 = load i8* @uc, align 1 ; <i8> [#uses=1] - %299 = zext i8 %298 to i32 ; <i32> [#uses=2] - %300 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %301 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %300, i32 %299) ; <i32> [#uses=1] - %302 = xor i32 %301, -1 ; <i32> [#uses=1] - %303 = and i32 %302, %299 ; <i32> [#uses=1] - store i32 %303, i32* @sl, align 4 - %304 = load i8* @uc, align 1 ; <i8> [#uses=1] - %305 = zext i8 %304 to i32 ; <i32> [#uses=2] - %306 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %307 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %306, i32 %305) ; <i32> [#uses=1] - %308 = xor i32 %307, -1 ; <i32> [#uses=1] - %309 = and i32 %308, %305 ; <i32> [#uses=1] - store i32 %309, i32* @ul, align 4 - %310 = load i8* @uc, align 1 ; <i8> [#uses=1] - %311 = zext i8 %310 to i64 ; <i64> [#uses=2] - %312 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - %313 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %312, i64 %311) ; <i64> [#uses=1] - %314 = xor i64 %313, -1 ; <i64> [#uses=1] - %315 = and i64 %314, %311 ; <i64> [#uses=1] - store i64 %315, i64* @sll, align 8 - %316 = load i8* @uc, align 1 ; <i8> [#uses=1] - %317 = zext i8 %316 to i64 ; <i64> [#uses=2] - %318 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - %319 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %318, i64 %317) ; <i64> [#uses=1] - %320 = xor i64 %319, -1 ; <i64> [#uses=1] - %321 = and i64 %320, %317 ; <i64> [#uses=1] - store i64 %321, i64* @ull, align 8 - br label %return - -return: ; preds = %entry - ret void -} - -define void @test_compare_and_swap() nounwind { -entry: - %0 = load i8* @sc, align 1 ; <i8> [#uses=1] - %1 = zext i8 %0 to i32 ; <i32> [#uses=1] - %2 = load i8* @uc, align 1 ; <i8> [#uses=1] - %3 = zext i8 %2 to i32 ; <i32> [#uses=1] - %4 = trunc i32 %3 to i8 ; <i8> [#uses=1] - %5 = trunc i32 %1 to i8 ; <i8> [#uses=1] - %6 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @sc, i8 %4, i8 %5) ; <i8> [#uses=1] - store i8 %6, i8* @sc, align 1 - %7 = load i8* @sc, align 1 ; <i8> [#uses=1] - %8 = zext i8 %7 to i32 ; <i32> [#uses=1] - %9 = load i8* @uc, align 1 ; <i8> [#uses=1] - %10 = zext i8 %9 to i32 ; <i32> [#uses=1] - %11 = trunc i32 %10 to i8 ; <i8> [#uses=1] - %12 = trunc i32 %8 to i8 ; <i8> [#uses=1] - %13 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @uc, i8 %11, i8 %12) ; <i8> [#uses=1] - store i8 %13, i8* @uc, align 1 - %14 = load i8* @sc, align 1 ; <i8> [#uses=1] - %15 = sext i8 %14 to i16 ; <i16> [#uses=1] - %16 = zext i16 %15 to i32 ; <i32> [#uses=1] - %17 = load i8* @uc, align 1 ; <i8> [#uses=1] - %18 = zext i8 %17 to i32 ; <i32> [#uses=1] - %19 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %20 = trunc i32 %18 to i16 ; <i16> [#uses=1] - %21 = trunc i32 %16 to i16 ; <i16> [#uses=1] - %22 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %19, i16 %20, i16 %21) ; <i16> [#uses=1] - store i16 %22, i16* @ss, align 2 - %23 = load i8* @sc, align 1 ; <i8> [#uses=1] - %24 = sext i8 %23 to i16 ; <i16> [#uses=1] - %25 = zext i16 %24 to i32 ; <i32> [#uses=1] - %26 = load i8* @uc, align 1 ; <i8> [#uses=1] - %27 = zext i8 %26 to i32 ; <i32> [#uses=1] - %28 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %29 = trunc i32 %27 to i16 ; <i16> [#uses=1] - %30 = trunc i32 %25 to i16 ; <i16> [#uses=1] - %31 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %28, i16 %29, i16 %30) ; <i16> [#uses=1] - store i16 %31, i16* @us, align 2 - %32 = load i8* @sc, align 1 ; <i8> [#uses=1] - %33 = sext i8 %32 to i32 ; <i32> [#uses=1] - %34 = load i8* @uc, align 1 ; <i8> [#uses=1] - %35 = zext i8 %34 to i32 ; <i32> [#uses=1] - %36 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %37 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %36, i32 %35, i32 %33) ; <i32> [#uses=1] - store i32 %37, i32* @si, align 4 - %38 = load i8* @sc, align 1 ; <i8> [#uses=1] - %39 = sext i8 %38 to i32 ; <i32> [#uses=1] - %40 = load i8* @uc, align 1 ; <i8> [#uses=1] - %41 = zext i8 %40 to i32 ; <i32> [#uses=1] - %42 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %43 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %42, i32 %41, i32 %39) ; <i32> [#uses=1] - store i32 %43, i32* @ui, align 4 - %44 = load i8* @sc, align 1 ; <i8> [#uses=1] - %45 = sext i8 %44 to i32 ; <i32> [#uses=1] - %46 = load i8* @uc, align 1 ; <i8> [#uses=1] - %47 = zext i8 %46 to i32 ; <i32> [#uses=1] - %48 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %49 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %48, i32 %47, i32 %45) ; <i32> [#uses=1] - store i32 %49, i32* @sl, align 4 - %50 = load i8* @sc, align 1 ; <i8> [#uses=1] - %51 = sext i8 %50 to i32 ; <i32> [#uses=1] - %52 = load i8* @uc, align 1 ; <i8> [#uses=1] - %53 = zext i8 %52 to i32 ; <i32> [#uses=1] - %54 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %55 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %54, i32 %53, i32 %51) ; <i32> [#uses=1] - store i32 %55, i32* @ul, align 4 - %56 = load i8* @sc, align 1 ; <i8> [#uses=1] - %57 = zext i8 %56 to i32 ; <i32> [#uses=1] - %58 = load i8* @uc, align 1 ; <i8> [#uses=1] - %59 = zext i8 %58 to i32 ; <i32> [#uses=1] - %60 = trunc i32 %59 to i8 ; <i8> [#uses=2] - %61 = trunc i32 %57 to i8 ; <i8> [#uses=1] - %62 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @sc, i8 %60, i8 %61) ; <i8> [#uses=1] - %63 = icmp eq i8 %62, %60 ; <i1> [#uses=1] - %64 = zext i1 %63 to i8 ; <i8> [#uses=1] - %65 = zext i8 %64 to i32 ; <i32> [#uses=1] - store i32 %65, i32* @ui, align 4 - %66 = load i8* @sc, align 1 ; <i8> [#uses=1] - %67 = zext i8 %66 to i32 ; <i32> [#uses=1] - %68 = load i8* @uc, align 1 ; <i8> [#uses=1] - %69 = zext i8 %68 to i32 ; <i32> [#uses=1] - %70 = trunc i32 %69 to i8 ; <i8> [#uses=2] - %71 = trunc i32 %67 to i8 ; <i8> [#uses=1] - %72 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @uc, i8 %70, i8 %71) ; <i8> [#uses=1] - %73 = icmp eq i8 %72, %70 ; <i1> [#uses=1] - %74 = zext i1 %73 to i8 ; <i8> [#uses=1] - %75 = zext i8 %74 to i32 ; <i32> [#uses=1] - store i32 %75, i32* @ui, align 4 - %76 = load i8* @sc, align 1 ; <i8> [#uses=1] - %77 = sext i8 %76 to i16 ; <i16> [#uses=1] - %78 = zext i16 %77 to i32 ; <i32> [#uses=1] - %79 = load i8* @uc, align 1 ; <i8> [#uses=1] - %80 = zext i8 %79 to i32 ; <i32> [#uses=1] - %81 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %82 = trunc i32 %80 to i16 ; <i16> [#uses=2] - %83 = trunc i32 %78 to i16 ; <i16> [#uses=1] - %84 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %81, i16 %82, i16 %83) ; <i16> [#uses=1] - %85 = icmp eq i16 %84, %82 ; <i1> [#uses=1] - %86 = zext i1 %85 to i8 ; <i8> [#uses=1] - %87 = zext i8 %86 to i32 ; <i32> [#uses=1] - store i32 %87, i32* @ui, align 4 - %88 = load i8* @sc, align 1 ; <i8> [#uses=1] - %89 = sext i8 %88 to i16 ; <i16> [#uses=1] - %90 = zext i16 %89 to i32 ; <i32> [#uses=1] - %91 = load i8* @uc, align 1 ; <i8> [#uses=1] - %92 = zext i8 %91 to i32 ; <i32> [#uses=1] - %93 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %94 = trunc i32 %92 to i16 ; <i16> [#uses=2] - %95 = trunc i32 %90 to i16 ; <i16> [#uses=1] - %96 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %93, i16 %94, i16 %95) ; <i16> [#uses=1] - %97 = icmp eq i16 %96, %94 ; <i1> [#uses=1] - %98 = zext i1 %97 to i8 ; <i8> [#uses=1] - %99 = zext i8 %98 to i32 ; <i32> [#uses=1] - store i32 %99, i32* @ui, align 4 - %100 = load i8* @sc, align 1 ; <i8> [#uses=1] - %101 = sext i8 %100 to i32 ; <i32> [#uses=1] - %102 = load i8* @uc, align 1 ; <i8> [#uses=1] - %103 = zext i8 %102 to i32 ; <i32> [#uses=2] - %104 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %105 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %104, i32 %103, i32 %101) ; <i32> [#uses=1] - %106 = icmp eq i32 %105, %103 ; <i1> [#uses=1] - %107 = zext i1 %106 to i8 ; <i8> [#uses=1] - %108 = zext i8 %107 to i32 ; <i32> [#uses=1] - store i32 %108, i32* @ui, align 4 - %109 = load i8* @sc, align 1 ; <i8> [#uses=1] - %110 = sext i8 %109 to i32 ; <i32> [#uses=1] - %111 = load i8* @uc, align 1 ; <i8> [#uses=1] - %112 = zext i8 %111 to i32 ; <i32> [#uses=2] - %113 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %114 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %113, i32 %112, i32 %110) ; <i32> [#uses=1] - %115 = icmp eq i32 %114, %112 ; <i1> [#uses=1] - %116 = zext i1 %115 to i8 ; <i8> [#uses=1] - %117 = zext i8 %116 to i32 ; <i32> [#uses=1] - store i32 %117, i32* @ui, align 4 - %118 = load i8* @sc, align 1 ; <i8> [#uses=1] - %119 = sext i8 %118 to i32 ; <i32> [#uses=1] - %120 = load i8* @uc, align 1 ; <i8> [#uses=1] - %121 = zext i8 %120 to i32 ; <i32> [#uses=2] - %122 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %123 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %122, i32 %121, i32 %119) ; <i32> [#uses=1] - %124 = icmp eq i32 %123, %121 ; <i1> [#uses=1] - %125 = zext i1 %124 to i8 ; <i8> [#uses=1] - %126 = zext i8 %125 to i32 ; <i32> [#uses=1] - store i32 %126, i32* @ui, align 4 - %127 = load i8* @sc, align 1 ; <i8> [#uses=1] - %128 = sext i8 %127 to i32 ; <i32> [#uses=1] - %129 = load i8* @uc, align 1 ; <i8> [#uses=1] - %130 = zext i8 %129 to i32 ; <i32> [#uses=2] - %131 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %132 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %131, i32 %130, i32 %128) ; <i32> [#uses=1] - %133 = icmp eq i32 %132, %130 ; <i1> [#uses=1] - %134 = zext i1 %133 to i8 ; <i8> [#uses=1] - %135 = zext i8 %134 to i32 ; <i32> [#uses=1] - store i32 %135, i32* @ui, align 4 - br label %return - -return: ; preds = %entry - ret void -} - -declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind - -declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind - -declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind - -define void @test_lock() nounwind { -entry: - %0 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=1] - store i8 %0, i8* @sc, align 1 - %1 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=1] - store i8 %1, i8* @uc, align 1 - %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - %3 = call i16 @llvm.atomic.swap.i16.p0i16(i16* %2, i16 1) ; <i16> [#uses=1] - store i16 %3, i16* @ss, align 2 - %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - %5 = call i16 @llvm.atomic.swap.i16.p0i16(i16* %4, i16 1) ; <i16> [#uses=1] - store i16 %5, i16* @us, align 2 - %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - %7 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %6, i32 1) ; <i32> [#uses=1] - store i32 %7, i32* @si, align 4 - %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - %9 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %8, i32 1) ; <i32> [#uses=1] - store i32 %9, i32* @ui, align 4 - %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - %11 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %10, i32 1) ; <i32> [#uses=1] - store i32 %11, i32* @sl, align 4 - %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - %13 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %12, i32 1) ; <i32> [#uses=1] - store i32 %13, i32* @ul, align 4 - call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false) - volatile store i8 0, i8* @sc, align 1 - volatile store i8 0, i8* @uc, align 1 - %14 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] - volatile store i16 0, i16* %14, align 2 - %15 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] - volatile store i16 0, i16* %15, align 2 - %16 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] - volatile store i32 0, i32* %16, align 4 - %17 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] - volatile store i32 0, i32* %17, align 4 - %18 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] - volatile store i32 0, i32* %18, align 4 - %19 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] - volatile store i32 0, i32* %19, align 4 - %20 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] - volatile store i64 0, i64* %20, align 8 - %21 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] - volatile store i64 0, i64* %21, align 8 - br label %return - -return: ; preds = %entry - ret void -} - -declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind - -declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind diff --git a/test/CodeGen/X86/2009-03-13-PHIElimBug.ll b/test/CodeGen/X86/2009-03-13-PHIElimBug.ll index 45fc269..e14c30a 100644 --- a/test/CodeGen/X86/2009-03-13-PHIElimBug.ll +++ b/test/CodeGen/X86/2009-03-13-PHIElimBug.ll @@ -24,9 +24,13 @@ cont2: ; preds = %cont lpad: ; preds = %cont, %entry %y = phi i32 [ %a, %entry ], [ %aa, %cont ] ; <i32> [#uses=1] + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup ret i32 %y } ; CHECK: call{{.*}}f ; CHECK: movl %eax, %esi ; CHECK: call{{.*}}g + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll b/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll index b13d33e..f8c7a15 100644 --- a/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll +++ b/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll @@ -17,8 +17,12 @@ cont2: ; preds = %cont lpad: ; preds = %cont, %entry %v = phi i32 [ %x, %entry ], [ %a, %cont ] ; <i32> [#uses=1] + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup ret i32 %v } ; CHECK: lpad ; CHECK-NEXT: Ltmp + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll b/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll index 01852a6..3076322 100644 --- a/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll +++ b/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | not grep movl +; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=+mmx,+sse2 | not grep movl define <8 x i8> @a(i8 zeroext %x) nounwind { %r = insertelement <8 x i8> undef, i8 %x, i32 0 diff --git a/test/CodeGen/X86/2009-06-05-VZextByteShort.ll b/test/CodeGen/X86/2009-06-05-VZextByteShort.ll index 5c51480..5f5d5cc 100644 --- a/test/CodeGen/X86/2009-06-05-VZextByteShort.ll +++ b/test/CodeGen/X86/2009-06-05-VZextByteShort.ll @@ -1,6 +1,6 @@ ; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 > %t1 ; RUN: grep movzwl %t1 | count 2 -; RUN: grep movzbl %t1 | count 2 +; RUN: grep movzbl %t1 | count 1 ; RUN: grep movd %t1 | count 4 define <4 x i16> @a(i32* %x1) nounwind { diff --git a/test/CodeGen/X86/2009-06-07-ExpandMMXBitcast.ll b/test/CodeGen/X86/2009-06-07-ExpandMMXBitcast.ll index 07ef53e..025ab2e 100644 --- a/test/CodeGen/X86/2009-06-07-ExpandMMXBitcast.ll +++ b/test/CodeGen/X86/2009-06-07-ExpandMMXBitcast.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86 -mattr=+mmx | grep movl | count 2 +; RUN: llc < %s -mtriple=x86_64-linux -mattr=+mmx | grep movd | count 3 define i64 @a(i32 %a, i32 %b) nounwind readnone { entry: diff --git a/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll b/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll index 228cd48..8ea70b4 100644 --- a/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll +++ b/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll @@ -1,8 +1,9 @@ -; RUN: llc < %s -march=x86 -mattr=+sse,-sse2 +; RUN: llc < %s -march=x86 -mattr=+sse,-sse2 | FileCheck %s ; PR2484 define <4 x float> @f4523(<4 x float> %a,<4 x float> %b) nounwind { entry: +; CHECK: shufps $-28, %xmm %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4,i32 5,i32 2,i32 3> ret <4 x float> %shuffle diff --git a/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll b/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll index 7b5e871..5483b73 100644 --- a/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll +++ b/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll @@ -34,6 +34,8 @@ invcont2: ; preds = %invcont1 ret i32 0 lpad: ; preds = %invcont1, %invcont, %entry + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup %8 = call i32 @vm_deallocate(i32 undef, i64 0, i64 %0) ; <i32> [#uses=0] unreachable } @@ -45,3 +47,5 @@ declare i8* @pluginInstance(i8*, i32) declare zeroext i8 @invoke(i8*, i32, i8*, i64, i32, i64*, i32*) declare void @booleanAndDataReply(i32, i32, i32, i32, i64, i32) + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/2009-10-16-Scope.ll b/test/CodeGen/X86/2009-10-16-Scope.ll index 86c2024..e41038d 100644 --- a/test/CodeGen/X86/2009-10-16-Scope.ll +++ b/test/CodeGen/X86/2009-10-16-Scope.ll @@ -23,10 +23,10 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone declare i32 @foo(i32) ssp !0 = metadata !{i32 5, i32 2, metadata !1, null} -!1 = metadata !{i32 458763, metadata !2}; [DW_TAG_lexical_block ] +!1 = metadata !{i32 458763, metadata !2, i32 1, i32 1}; [DW_TAG_lexical_block ] !2 = metadata !{i32 458798, i32 0, metadata !3, metadata !"bar", metadata !"bar", metadata !"bar", metadata !3, i32 4, null, i1 false, i1 true}; [DW_TAG_subprogram ] !3 = metadata !{i32 458769, i32 0, i32 12, metadata !"genmodes.i", metadata !"/Users/yash/Downloads", metadata !"clang 1.1", i1 true, i1 false, metadata !"", i32 0}; [DW_TAG_compile_unit ] !4 = metadata !{i32 459008, metadata !5, metadata !"count_", metadata !3, i32 5, metadata !6}; [ DW_TAG_auto_variable ] -!5 = metadata !{i32 458763, metadata !1}; [DW_TAG_lexical_block ] +!5 = metadata !{i32 458763, metadata !1, i32 1, i32 1}; [DW_TAG_lexical_block ] !6 = metadata !{i32 458788, metadata !3, metadata !"int", metadata !3, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}; [DW_TAG_base_type ] !7 = metadata !{i32 6, i32 1, metadata !2, null} diff --git a/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll b/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll index d7f0c1a..006a02a 100644 --- a/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll +++ b/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll @@ -36,7 +36,7 @@ lt_init.exit: ; preds = %if.end.i, %if.then. call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind %4 = call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1] %5 = sub i64 %4, %2 ; <i64> [#uses=1] - %6 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* getelementptr inbounds ([1216 x i64]* @__profiling_callsite_timestamps_live, i32 0, i32 51), i64 %5) nounwind ; <i64> [#uses=0] + %6 = atomicrmw add i64* getelementptr inbounds ([1216 x i64]* @__profiling_callsite_timestamps_live, i32 0, i32 51), i64 %5 monotonic ;CHECK: lock ;CHECK-NEXT: {{xadd|addq}} %rdx, __profiling_callsite_timestamps_live ;CHECK-NEXT: cmpl $0, @@ -54,7 +54,7 @@ if.end: ; preds = %if.then, %lt_init.e tail call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind %8 = tail call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1] %9 = sub i64 %8, %0 ; <i64> [#uses=1] - %10 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* getelementptr inbounds ([1216 x i64]* @__profiling_callsite_timestamps_live, i32 0, i32 50), i64 %9) ; <i64> [#uses=0] + %10 = atomicrmw add i64* getelementptr inbounds ([1216 x i64]* @__profiling_callsite_timestamps_live, i32 0, i32 50), i64 %9 monotonic ret i32 %7 } @@ -64,6 +64,4 @@ declare i32 @lt_dlinit() declare i32 @warn_dlerror(i8*) nounwind -declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind - declare i64 @llvm.readcyclecounter() nounwind diff --git a/test/CodeGen/X86/2009-11-25-ImpDefBug.ll b/test/CodeGen/X86/2009-11-25-ImpDefBug.ll index 7606c0e..396638f 100644 --- a/test/CodeGen/X86/2009-11-25-ImpDefBug.ll +++ b/test/CodeGen/X86/2009-11-25-ImpDefBug.ll @@ -46,6 +46,8 @@ bb1.i5: ; preds = %bb.i1 lpad: ; preds = %bb1.i.fragment.cl, %bb1.i.fragment, %bb5 %.SV10.phi807 = phi i8* [ undef, %bb1.i.fragment.cl ], [ undef, %bb1.i.fragment ], [ undef, %bb5 ] ; <i8*> [#uses=1] + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup %1 = load i8* %.SV10.phi807, align 8 ; <i8> [#uses=0] br i1 undef, label %meshBB81.bbcl.disp, label %bb13.fragment.bbcl.disp @@ -114,3 +116,5 @@ meshBB81.bbcl.disp: ; preds = %meshBB81.cl141, %me i8 51, label %meshBB81.cl141 ] } + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll index c693636..8b55bd7 100644 --- a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll +++ b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll @@ -18,12 +18,6 @@ entry: ; CHECK: lock ; CHECK: cmpxchg8b ; CHECK: jne - tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0] - tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + %0 = atomicrmw add i64* %p, i64 1 seq_cst ret void } - -declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind - -declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind diff --git a/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll b/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll index ef1798d..864ebf1 100644 --- a/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll +++ b/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll @@ -60,5 +60,9 @@ bb92: ; preds = %entry unreachable lpad159: ; preds = %bb58 + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable } + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll b/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll index 69787c7..5372bc5 100644 --- a/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll +++ b/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll @@ -1,32 +1,35 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s ; There are no MMX operations here, so we use XMM or i64. +; CHECK: ti8 define void @ti8(double %a, double %b) nounwind { entry: %tmp1 = bitcast double %a to <8 x i8> %tmp2 = bitcast double %b to <8 x i8> %tmp3 = add <8 x i8> %tmp1, %tmp2 -; CHECK: paddb %xmm1, %xmm0 +; CHECK: paddw store <8 x i8> %tmp3, <8 x i8>* null ret void } +; CHECK: ti16 define void @ti16(double %a, double %b) nounwind { entry: %tmp1 = bitcast double %a to <4 x i16> %tmp2 = bitcast double %b to <4 x i16> %tmp3 = add <4 x i16> %tmp1, %tmp2 -; CHECK: paddw %xmm1, %xmm0 +; CHECK: paddd store <4 x i16> %tmp3, <4 x i16>* null ret void } +; CHECK: ti32 define void @ti32(double %a, double %b) nounwind { entry: %tmp1 = bitcast double %a to <2 x i32> %tmp2 = bitcast double %b to <2 x i32> %tmp3 = add <2 x i32> %tmp1, %tmp2 -; CHECK: paddd %xmm1, %xmm0 +; CHECK: paddq store <2 x i32> %tmp3, <2 x i32>* null ret void } @@ -55,6 +58,7 @@ entry: ret void } +; CHECK: ti16a define void @ti16a(double %a, double %b) nounwind { entry: %tmp1 = bitcast double %a to x86_mmx @@ -66,6 +70,7 @@ entry: ret void } +; CHECK: ti32a define void @ti32a(double %a, double %b) nounwind { entry: %tmp1 = bitcast double %a to x86_mmx @@ -77,6 +82,7 @@ entry: ret void } +; CHECK: ti64a define void @ti64a(double %a, double %b) nounwind { entry: %tmp1 = bitcast double %a to x86_mmx diff --git a/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll b/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll index 3738f80..7af58dc 100644 --- a/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll +++ b/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll @@ -84,8 +84,6 @@ cleanup.switch: ; preds = %invoke.cont5 ] cleanup.end: ; preds = %cleanup.switch - %exc6 = call i8* @llvm.eh.exception() ; <i8*> [#uses=1] - store i8* %exc6, i8** %_rethrow store i32 2, i32* %cleanup.dst7 br label %finally diff --git a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll index 40e7f01..0e4118a 100644 --- a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll +++ b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll @@ -18,11 +18,9 @@ entry: loop: ; CHECK: lock ; CHECK-NEXT: cmpxchg8b - %r = call i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* %ptr, i64 0, i64 1) + %r = cmpxchg i64* %ptr, i64 0, i64 1 monotonic %stored1 = icmp eq i64 %r, 0 br i1 %stored1, label %loop, label %continue continue: ret void } - -declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* nocapture, i64, i64) nounwind diff --git a/test/CodeGen/X86/2011-08-23-PerformSubCombine128.ll b/test/CodeGen/X86/2011-08-23-PerformSubCombine128.ll new file mode 100644 index 0000000..12171ac --- /dev/null +++ b/test/CodeGen/X86/2011-08-23-PerformSubCombine128.ll @@ -0,0 +1,18 @@ +; RUN: llc -march=x86-64 -O2 < %s + +define void @test(i64 %add127.tr.i2686) { +entry: + %conv143.i2687 = and i64 %add127.tr.i2686, 72057594037927935 + %conv76.i2623 = zext i64 %conv143.i2687 to i128 + %mul148.i2338 = mul i128 0, %conv76.i2623 + %add149.i2339 = add i128 %mul148.i2338, 0 + %add.i2303 = add i128 0, 170141183460469229370468033484042534912 + %add6.i2270 = add i128 %add.i2303, 0 + %sub58.i2271 = sub i128 %add6.i2270, %add149.i2339 + %add71.i2272 = add i128 %sub58.i2271, 0 + %add105.i2273 = add i128 %add71.i2272, 0 + %add116.i2274 = add i128 %add105.i2273, 0 + %shr124.i2277 = lshr i128 %add116.i2274, 56 + %add116.tr.i2280 = trunc i128 %add116.i2274 to i64 + ret void +} diff --git a/test/CodeGen/X86/2011-08-23-Trampoline.ll b/test/CodeGen/X86/2011-08-23-Trampoline.ll new file mode 100644 index 0000000..7a5a0f8 --- /dev/null +++ b/test/CodeGen/X86/2011-08-23-Trampoline.ll @@ -0,0 +1,16 @@ +; RUN: llc < %s -march=x86 +; RUN: llc < %s -march=x86-64 + + %struct.FRAME.gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets = type { i32, i32, void (i32, i32)*, i8 (i32, i32)* } + +define fastcc i32 @gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets.5146(i64 %table.0.0, i64 %table.0.1, i32 %last, i32 %pos) { +entry: + call void @llvm.init.trampoline( i8* null, i8* bitcast (void (%struct.FRAME.gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets*, i32, i32)* @gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets__move.5177 to i8*), i8* null ) ; <i8*> [#uses=0] + %tramp22 = call i8* @llvm.adjust.trampoline( i8* null) + unreachable +} + +declare void @gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets__move.5177(%struct.FRAME.gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets* nest , i32, i32) nounwind + +declare void @llvm.init.trampoline(i8*, i8*, i8*) nounwind +declare i8* @llvm.adjust.trampoline(i8*) nounwind diff --git a/test/CodeGen/X86/2011-08-29-BlockConstant.ll b/test/CodeGen/X86/2011-08-29-BlockConstant.ll new file mode 100644 index 0000000..83e4bcc --- /dev/null +++ b/test/CodeGen/X86/2011-08-29-BlockConstant.ll @@ -0,0 +1,34 @@ +; RUN: llc -march=x86-64 < %s | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" +target triple = "x86_64-unknown-linux-gnu" + +@x = global [500 x i64] zeroinitializer, align 64 ; <[500 x i64]*> +; CHECK: x: +; CHECK: .zero 4000 + +@y = global [63 x i64] [ + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262, + i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262], + align 64 ; <[63 x i64]*> 0x5e5e5e5e +; CHECK: y: +; CHECK: .zero 504,94 diff --git a/test/CodeGen/X86/2011-08-29-InitOrder.ll b/test/CodeGen/X86/2011-08-29-InitOrder.ll new file mode 100644 index 0000000..72c79d2 --- /dev/null +++ b/test/CodeGen/X86/2011-08-29-InitOrder.ll @@ -0,0 +1,28 @@ +; RUN: llc < %s -mtriple=i386-linux-gnu | FileCheck %s --check-prefix=CHECK-DEFAULT +; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s --check-prefix=CHECK-DARWIN +; PR5329 + +@llvm.global_ctors = appending global [3 x { i32, void ()* }] [{ i32, void ()* } { i32 2000, void ()* @construct_2 }, { i32, void ()* } { i32 3000, void ()* @construct_3 }, { i32, void ()* } { i32 1000, void ()* @construct_1 }] +; CHECK-DEFAULT: construct_3 +; CHECK-DEFAULT: construct_2 +; CHECK-DEFAULT: construct_1 + +; CHECK-DARWIN: construct_1 +; CHECK-DARWIN: construct_2 +; CHECK-DARWIN: construct_3 + +@llvm.global_dtors = appending global [3 x { i32, void ()* }] [{ i32, void ()* } { i32 2000, void ()* @destruct_2 }, { i32, void ()* } { i32 1000, void ()* @destruct_1 }, { i32, void ()* } { i32 3000, void ()* @destruct_3 }] +; CHECK-DEFAULT: destruct_3 +; CHECK-DEFAULT: destruct_2 +; CHECK-DEFAULT: destruct_1 + +; CHECK-DARWIN: destruct_1 +; CHECK-DARWIN: destruct_2 +; CHECK-DARWIN: destruct_3 + +declare void @construct_1() +declare void @construct_2() +declare void @construct_3() +declare void @destruct_1() +declare void @destruct_2() +declare void @destruct_3() diff --git a/test/CodeGen/X86/2011-09-14-valcoalesce.ll b/test/CodeGen/X86/2011-09-14-valcoalesce.ll new file mode 100644 index 0000000..1068d1b --- /dev/null +++ b/test/CodeGen/X86/2011-09-14-valcoalesce.ll @@ -0,0 +1,174 @@ +; RUN: llc < %s -march=x86 | FileCheck %s +; +; Test RegistersDefinedFromSameValue. We have multiple copies of the same vreg: +; while.body85.i: +; vreg1 = copy vreg2 +; vreg2 = add +; critical edge from land.lhs.true.i -> if.end117.i: +; vreg27 = vreg2 +; critical edge from land.lhs.true103.i -> if.end117.i: +; vreg27 = vreg2 +; if.then108.i: +; vreg27 = vreg1 +; +; Prior to fixing PR10920 401.bzip miscompile, the coalescer would +; consider vreg1 and vreg27 to be copies of the same value. It would +; then remove one of the critical edge copes, which cannot safely be removed. +; +; CHECK: # %while.body85.i +; CHECK-NOT: # % +; CHECK-NOT: add +; CHECK: movl %[[POSTR:e[abcdxi]+]], %[[PRER:e[abcdxi]+]] +; CHECK: addl %{{.*}}, %[[POSTR]] +; CHECK: # %while.end.i +; CHECK: movl %[[POSTR]], %[[USER:e[abcdxi]+]] +; CHECK: # %land.lhs.true.i +; CHECK: movl %[[POSTR]], %[[USER]] +; CHECK: # %land.lhs.true103.i +; CHECK: movl %[[POSTR]], %[[USER]] +; CHECK: # %if.then108.i +; [[PRER] live out, so nothing on this path should define it. +; CHECK-NOT: , %[[PRER]] +; CHECK: # %if.end117.i + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32" + +@.str3 = external unnamed_addr constant [59 x i8], align 1 + +define void @BZ2_compressBlock() nounwind ssp { +entry: + br i1 undef, label %if.then68, label %if.end85 + +if.then68: ; preds = %entry + br label %for.body.i.i + +for.body.i.i: ; preds = %for.inc.i.i, %if.then68 + br i1 undef, label %for.inc.i.i, label %if.then.i.i + +if.then.i.i: ; preds = %for.body.i.i + br label %for.inc.i.i + +for.inc.i.i: ; preds = %if.then.i.i, %for.body.i.i + br i1 undef, label %makeMaps_e.exit.i, label %for.body.i.i + +makeMaps_e.exit.i: ; preds = %for.inc.i.i + br i1 undef, label %for.cond19.preheader.i, label %for.cond.for.cond19.preheader_crit_edge.i + +for.cond.for.cond19.preheader_crit_edge.i: ; preds = %makeMaps_e.exit.i + unreachable + +for.cond19.preheader.i: ; preds = %makeMaps_e.exit.i + br i1 undef, label %for.body25.lr.ph.i, label %for.cond33.preheader.i + +for.body25.lr.ph.i: ; preds = %for.cond19.preheader.i + br label %for.body25.i + +for.cond33.preheader.i: ; preds = %for.body25.i, %for.cond19.preheader.i + br i1 undef, label %if.then.i, label %if.end.i + +for.body25.i: ; preds = %for.body25.i, %for.body25.lr.ph.i + br i1 undef, label %for.body25.i, label %for.cond33.preheader.i + +if.then.i: ; preds = %for.cond33.preheader.i + br label %if.end.i + +if.end.i: ; preds = %if.then.i, %for.cond33.preheader.i + br i1 undef, label %for.inc27.us.5.i, label %for.end30.i + +for.end30.i: ; preds = %for.inc27.us.5.i, %if.end.i + br i1 undef, label %if.end36.i, label %if.then35.i + +if.then35.i: ; preds = %for.end30.i + unreachable + +if.end36.i: ; preds = %for.end30.i + %sub83.i = add nsw i32 undef, 1 + br label %while.body.i188 + +for.cond182.preheader.i: ; preds = %for.end173.i + br i1 undef, label %for.inc220.us.i, label %while.body300.preheader.i + +while.body.i188: ; preds = %for.end173.i, %if.end36.i + %gs.0526.i = phi i32 [ 0, %if.end36.i ], [ %add177.i, %for.end173.i ] + %or.cond514517.i = and i1 false, undef + br i1 %or.cond514517.i, label %while.body85.i, label %if.end117.i + +while.body85.i: ; preds = %while.body85.i, %while.body.i188 + %aFreq.0518.i = phi i32 [ %add93.i, %while.body85.i ], [ 0, %while.body.i188 ] + %inc87.i = add nsw i32 0, 1 + %tmp91.i = load i32* undef, align 4, !tbaa !0 + %add93.i = add nsw i32 %tmp91.i, %aFreq.0518.i + %or.cond514.i = and i1 undef, false + br i1 %or.cond514.i, label %while.body85.i, label %while.end.i + +while.end.i: ; preds = %while.body85.i + br i1 undef, label %land.lhs.true.i, label %if.end117.i + +land.lhs.true.i: ; preds = %while.end.i + br i1 undef, label %land.lhs.true103.i, label %if.end117.i + +land.lhs.true103.i: ; preds = %land.lhs.true.i + br i1 undef, label %if.then108.i, label %if.end117.i + +if.then108.i: ; preds = %land.lhs.true103.i + br label %if.end117.i + +if.end117.i: ; preds = %if.then108.i, %land.lhs.true103.i, %land.lhs.true.i, %while.end.i, %while.body.i188 + %aFreq.1.i = phi i32 [ %aFreq.0518.i, %if.then108.i ], [ %add93.i, %land.lhs.true103.i ], [ %add93.i, %land.lhs.true.i ], [ %add93.i, %while.end.i ], [ 0, %while.body.i188 ] + %ge.1.i = phi i32 [ 0, %if.then108.i ], [ %inc87.i, %land.lhs.true103.i ], [ %inc87.i, %land.lhs.true.i ], [ %inc87.i, %while.end.i ], [ 0, %while.body.i188 ] + br i1 undef, label %if.then122.i, label %for.cond138.preheader.i + +if.then122.i: ; preds = %if.end117.i + call void (...)* @fprintf(i32 undef, i32 %gs.0526.i, i32 %ge.1.i, i32 %aFreq.1.i, double undef) nounwind + br label %for.cond138.preheader.i + +for.cond138.preheader.i: ; preds = %if.then122.i, %if.end117.i + br i1 undef, label %for.body143.lr.ph.i, label %for.end173.i + +for.body143.lr.ph.i: ; preds = %for.cond138.preheader.i + br label %for.body143.i + +for.body143.i: ; preds = %for.body143.i, %for.body143.lr.ph.i + br i1 undef, label %for.end173.i, label %for.body143.i + +for.end173.i: ; preds = %for.body143.i, %for.cond138.preheader.i + %add177.i = add nsw i32 %ge.1.i, 1 + %cmp73.i = icmp sgt i32 undef, 0 + br i1 %cmp73.i, label %while.body.i188, label %for.cond182.preheader.i + +for.inc220.us.i: ; preds = %for.cond182.preheader.i + unreachable + +while.body300.preheader.i: ; preds = %for.cond182.preheader.i + br i1 undef, label %for.end335.i, label %while.end2742.i + +for.end335.i: ; preds = %for.end2039.i, %while.body300.preheader.i + br label %for.body2021.i + +for.body2021.i: ; preds = %for.body2021.i, %for.end335.i + br i1 undef, label %for.body2021.i, label %for.end2039.i + +for.end2039.i: ; preds = %for.body2021.i + br label %for.end335.i + +while.end2742.i: ; preds = %while.body300.preheader.i + br i1 undef, label %if.then2748.i, label %for.body2778.i + +if.then2748.i: ; preds = %while.end2742.i + unreachable + +for.body2778.i: ; preds = %while.end2742.i + unreachable + +for.inc27.us.5.i: ; preds = %if.end.i + br label %for.end30.i + +if.end85: ; preds = %entry + ret void +} + +declare void @fprintf(...) nounwind + +!0 = metadata !{metadata !"int", metadata !1} +!1 = metadata !{metadata !"omnipotent char", metadata !2} +!2 = metadata !{metadata !"Simple C/C++ TBAA", null} diff --git a/test/CodeGen/X86/2011-09-18-sse2cmp.ll b/test/CodeGen/X86/2011-09-18-sse2cmp.ll new file mode 100644 index 0000000..844d674 --- /dev/null +++ b/test/CodeGen/X86/2011-09-18-sse2cmp.ll @@ -0,0 +1,12 @@ +;RUN: llc < %s -march=x86 -mcpu=yonah -promote-elements -mattr=+sse2,-sse41 | FileCheck %s + +;CHECK: @max +;CHECK: cmplepd +;CHECK: ret + +define <2 x double> @max(<2 x double> %x, <2 x double> %y) { + %max_is_x = fcmp oge <2 x double> %x, %y + %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y + ret <2 x double> %max +} + diff --git a/test/CodeGen/X86/2011-09-21-setcc-bug.ll b/test/CodeGen/X86/2011-09-21-setcc-bug.ll new file mode 100644 index 0000000..ed5649c --- /dev/null +++ b/test/CodeGen/X86/2011-09-21-setcc-bug.ll @@ -0,0 +1,27 @@ +; RUN: llc < %s -march=x86-64 -mcpu=corei7 -promote-elements -mattr=+sse41 + +; Make sure we are not crashing on this code. + +define void @load_4_i8(<4 x i8>* %k, <4 x i8>* %y, <4 x double>* %A1, <4 x double>* %A0) { + %A = load <4 x i8>* %k + %B = load <4 x i8>* %y + %C = load <4 x double>* %A0 + %D= load <4 x double>* %A1 + %M = icmp uge <4 x i8> %A, %B + %T = select <4 x i1> %M, <4 x double> %C, <4 x double> %D + store <4 x double> %T, <4 x double>* undef + ret void +} + + +define void @load_256_i8(<256 x i8>* %k, <256 x i8>* %y, <256 x double>* %A1, <256 x double>* %A0) { + %A = load <256 x i8>* %k + %B = load <256 x i8>* %y + %C = load <256 x double>* %A0 + %D= load <256 x double>* %A1 + %M = icmp uge <256 x i8> %A, %B + %T = select <256 x i1> %M, <256 x double> %C, <256 x double> %D + store <256 x double> %T, <256 x double>* undef + ret void +} + diff --git a/test/CodeGen/X86/2011-10-11-SpillDead.ll b/test/CodeGen/X86/2011-10-11-SpillDead.ll new file mode 100644 index 0000000..8e70d65 --- /dev/null +++ b/test/CodeGen/X86/2011-10-11-SpillDead.ll @@ -0,0 +1,19 @@ +; RUN: llc < %s -verify-regalloc +; PR11125 +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.7" + +; The inline asm takes %x as a GR32_ABCD virtual register. +; The call to @g forces a spill of that register. +; +; The asm has a dead output tied to %x. +; Verify that the spiller creates a value number for that dead def. +; +define void @f(i32 %x) nounwind uwtable ssp { +entry: + tail call void @g() nounwind + %0 = tail call i32 asm sideeffect "foo $0", "=Q,0,~{ebx},~{dirflag},~{fpsr},~{flags}"(i32 %x) nounwind + ret void +} + +declare void @g() diff --git a/test/CodeGen/X86/2011-10-11-srl.ll b/test/CodeGen/X86/2011-10-11-srl.ll new file mode 100644 index 0000000..cf9d36f --- /dev/null +++ b/test/CodeGen/X86/2011-10-11-srl.ll @@ -0,0 +1,11 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -promote-elements -mattr=-sse41 + +target triple = "x86_64-unknown-linux-gnu" + +define void @m387(<2 x i8>* %p, <2 x i16>* %q) { + %t = load <2 x i8>* %p + %r = sext <2 x i8> %t to <2 x i16> + store <2 x i16> %r, <2 x i16>* %q + ret void +} + diff --git a/test/CodeGen/X86/2011-10-12-MachineCSE.ll b/test/CodeGen/X86/2011-10-12-MachineCSE.ll new file mode 100644 index 0000000..cd15f84 --- /dev/null +++ b/test/CodeGen/X86/2011-10-12-MachineCSE.ll @@ -0,0 +1,116 @@ +; RUN: llc -verify-machineinstrs < %s +; <rdar://problem/10270968> +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.7.2" + +%struct.optab = type { i32, [59 x %struct.anon.3] } +%struct.anon.3 = type { i32, %struct.rtx_def* } +%struct.rtx_def = type { [2 x i8], i8, i8, [1 x %union.rtunion_def] } +%union.rtunion_def = type { i64 } +%struct.insn_data = type { i8*, i8*, %struct.rtx_def* (%struct.rtx_def*, ...)*, %struct.insn_operand_data*, i8, i8, i8, i8 } +%struct.insn_operand_data = type { i32 (%struct.rtx_def*, i32)*, i8*, [2 x i8], i8, i8 } + +@optab_table = external global [49 x %struct.optab*], align 16 +@insn_data = external constant [0 x %struct.insn_data] + +define %struct.rtx_def* @gen_add3_insn(%struct.rtx_def* %r0, %struct.rtx_def* %r1, %struct.rtx_def* %c) nounwind uwtable ssp { +entry: + %0 = bitcast %struct.rtx_def* %r0 to i32* + %1 = load i32* %0, align 8 + %2 = lshr i32 %1, 16 + %bf.clear = and i32 %2, 255 + %idxprom = sext i32 %bf.clear to i64 + %3 = load %struct.optab** getelementptr inbounds ([49 x %struct.optab*]* @optab_table, i32 0, i64 0), align 8, !tbaa !0 + %handlers = getelementptr inbounds %struct.optab* %3, i32 0, i32 1 + %arrayidx = getelementptr inbounds [59 x %struct.anon.3]* %handlers, i32 0, i64 %idxprom + %insn_code = getelementptr inbounds %struct.anon.3* %arrayidx, i32 0, i32 0 + %4 = load i32* %insn_code, align 4, !tbaa !3 + %cmp = icmp eq i32 %4, 1317 + br i1 %cmp, label %if.then, label %lor.lhs.false + +lor.lhs.false: ; preds = %entry + %idxprom1 = sext i32 %4 to i64 + %arrayidx2 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom1 + %operand = getelementptr inbounds %struct.insn_data* %arrayidx2, i32 0, i32 3 + %5 = load %struct.insn_operand_data** %operand, align 8, !tbaa !0 + %arrayidx3 = getelementptr inbounds %struct.insn_operand_data* %5, i64 0 + %predicate = getelementptr inbounds %struct.insn_operand_data* %arrayidx3, i32 0, i32 0 + %6 = load i32 (%struct.rtx_def*, i32)** %predicate, align 8, !tbaa !0 + %idxprom4 = sext i32 %4 to i64 + %arrayidx5 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom4 + %operand6 = getelementptr inbounds %struct.insn_data* %arrayidx5, i32 0, i32 3 + %7 = load %struct.insn_operand_data** %operand6, align 8, !tbaa !0 + %arrayidx7 = getelementptr inbounds %struct.insn_operand_data* %7, i64 0 + %8 = bitcast %struct.insn_operand_data* %arrayidx7 to i8* + %bf.field.offs = getelementptr i8* %8, i32 16 + %9 = bitcast i8* %bf.field.offs to i32* + %10 = load i32* %9, align 8 + %bf.clear8 = and i32 %10, 65535 + %call = tail call i32 %6(%struct.rtx_def* %r0, i32 %bf.clear8) + %tobool = icmp ne i32 %call, 0 + br i1 %tobool, label %lor.lhs.false9, label %if.then + +lor.lhs.false9: ; preds = %lor.lhs.false + %idxprom10 = sext i32 %4 to i64 + %arrayidx11 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom10 + %operand12 = getelementptr inbounds %struct.insn_data* %arrayidx11, i32 0, i32 3 + %11 = load %struct.insn_operand_data** %operand12, align 8, !tbaa !0 + %arrayidx13 = getelementptr inbounds %struct.insn_operand_data* %11, i64 1 + %predicate14 = getelementptr inbounds %struct.insn_operand_data* %arrayidx13, i32 0, i32 0 + %12 = load i32 (%struct.rtx_def*, i32)** %predicate14, align 8, !tbaa !0 + %idxprom15 = sext i32 %4 to i64 + %arrayidx16 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom15 + %operand17 = getelementptr inbounds %struct.insn_data* %arrayidx16, i32 0, i32 3 + %13 = load %struct.insn_operand_data** %operand17, align 8, !tbaa !0 + %arrayidx18 = getelementptr inbounds %struct.insn_operand_data* %13, i64 1 + %14 = bitcast %struct.insn_operand_data* %arrayidx18 to i8* + %bf.field.offs19 = getelementptr i8* %14, i32 16 + %15 = bitcast i8* %bf.field.offs19 to i32* + %16 = load i32* %15, align 8 + %bf.clear20 = and i32 %16, 65535 + %call21 = tail call i32 %12(%struct.rtx_def* %r1, i32 %bf.clear20) + %tobool22 = icmp ne i32 %call21, 0 + br i1 %tobool22, label %lor.lhs.false23, label %if.then + +lor.lhs.false23: ; preds = %lor.lhs.false9 + %idxprom24 = sext i32 %4 to i64 + %arrayidx25 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom24 + %operand26 = getelementptr inbounds %struct.insn_data* %arrayidx25, i32 0, i32 3 + %17 = load %struct.insn_operand_data** %operand26, align 8, !tbaa !0 + %arrayidx27 = getelementptr inbounds %struct.insn_operand_data* %17, i64 2 + %predicate28 = getelementptr inbounds %struct.insn_operand_data* %arrayidx27, i32 0, i32 0 + %18 = load i32 (%struct.rtx_def*, i32)** %predicate28, align 8, !tbaa !0 + %idxprom29 = sext i32 %4 to i64 + %arrayidx30 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom29 + %operand31 = getelementptr inbounds %struct.insn_data* %arrayidx30, i32 0, i32 3 + %19 = load %struct.insn_operand_data** %operand31, align 8, !tbaa !0 + %arrayidx32 = getelementptr inbounds %struct.insn_operand_data* %19, i64 2 + %20 = bitcast %struct.insn_operand_data* %arrayidx32 to i8* + %bf.field.offs33 = getelementptr i8* %20, i32 16 + %21 = bitcast i8* %bf.field.offs33 to i32* + %22 = load i32* %21, align 8 + %bf.clear34 = and i32 %22, 65535 + %call35 = tail call i32 %18(%struct.rtx_def* %c, i32 %bf.clear34) + %tobool36 = icmp ne i32 %call35, 0 + br i1 %tobool36, label %if.end, label %if.then + +if.then: ; preds = %lor.lhs.false23, %lor.lhs.false9, %lor.lhs.false, %entry + br label %return + +if.end: ; preds = %lor.lhs.false23 + %idxprom37 = sext i32 %4 to i64 + %arrayidx38 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom37 + %genfun = getelementptr inbounds %struct.insn_data* %arrayidx38, i32 0, i32 2 + %23 = load %struct.rtx_def* (%struct.rtx_def*, ...)** %genfun, align 8, !tbaa !0 + %call39 = tail call %struct.rtx_def* (%struct.rtx_def*, ...)* %23(%struct.rtx_def* %r0, %struct.rtx_def* %r1, %struct.rtx_def* %c) + br label %return + +return: ; preds = %if.end, %if.then + %24 = phi %struct.rtx_def* [ %call39, %if.end ], [ null, %if.then ] + ret %struct.rtx_def* %24 +} + +!0 = metadata !{metadata !"any pointer", metadata !1} +!1 = metadata !{metadata !"omnipotent char", metadata !2} +!2 = metadata !{metadata !"Simple C/C++ TBAA", null} +!3 = metadata !{metadata !"_ZTS9insn_code", metadata !1} diff --git a/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll b/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll new file mode 100644 index 0000000..c828b4b --- /dev/null +++ b/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=x86 -fast-isel < %s | FileCheck %s +; <rdar://problem/10215997> +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128" +target triple = "i386-apple-macosx10.7" + +define void @vectortest() nounwind ssp { +entry: + %p1 = alloca <4 x float>, align 16 + %p2 = alloca <4 x float>, align 16 + %p3 = alloca <4 x float>, align 16 + %p4 = alloca <4 x float>, align 16 + %p5 = alloca <4 x float>, align 16 + store <4 x float> <float 0x3FF19999A0000000, float 0x3FF3333340000000, float 0x3FF4CCCCC0000000, float 0x3FF6666660000000>, <4 x float>* %p1, align 16 + store <4 x float> <float 0x4000CCCCC0000000, float 0x40019999A0000000, float 0x4002666660000000, float 0x4003333340000000>, <4 x float>* %p2, align 16 + store <4 x float> <float 0x4008CCCCC0000000, float 0x40099999A0000000, float 0x400A666660000000, float 0x400B333340000000>, <4 x float>* %p3, align 16 + store <4 x float> <float 0x4010666660000000, float 0x4010CCCCC0000000, float 0x4011333340000000, float 0x40119999A0000000>, <4 x float>* %p4, align 16 + store <4 x float> <float 0x4014666660000000, float 0x4014CCCCC0000000, float 0x4015333340000000, float 0x40159999A0000000>, <4 x float>* %p5, align 16 + %0 = load <4 x float>* %p1, align 16 + %1 = load <4 x float>* %p2, align 16 + %2 = load <4 x float>* %p3, align 16 + %3 = load <4 x float>* %p4, align 16 + %4 = load <4 x float>* %p5, align 16 +; CHECK: movaps {{%xmm[0-7]}}, (%esp) +; CHECK-NEXT: calll _dovectortest + call void @dovectortest(<4 x float> %0, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4) + ret void +} + +declare void @dovectortest(<4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>) diff --git a/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll b/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll new file mode 100644 index 0000000..a720753 --- /dev/null +++ b/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll @@ -0,0 +1,28 @@ +; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i8:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +%union.anon = type { <2 x i8> } + +@i = global <2 x i8> <i8 150, i8 100>, align 8 +@j = global <2 x i8> <i8 10, i8 13>, align 8 +@res = common global %union.anon zeroinitializer, align 8 + +; Make sure we load the constants i and j starting offset zero. +; Also make sure that we sign-extend it. +; Based on /gcc-4_2-testsuite/src/gcc.c-torture/execute/pr23135.c + +; CHECK: main +define i32 @main() nounwind uwtable { +entry: +; CHECK: movsbq j(%rip), % +; CHECK: movsbq i(%rip), % + %0 = load <2 x i8>* @i, align 8 + %1 = load <2 x i8>* @j, align 8 + %div = sdiv <2 x i8> %1, %0 + store <2 x i8> %div, <2 x i8>* getelementptr inbounds (%union.anon* @res, i32 0, i32 0), align 8 + ret i32 0 +; CHECK: ret +} + diff --git a/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/test/CodeGen/X86/2011-10-19-widen_vselect.ll new file mode 100644 index 0000000..e08c5b2 --- /dev/null +++ b/test/CodeGen/X86/2011-10-19-widen_vselect.ll @@ -0,0 +1,68 @@ +; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s + +target triple = "x86_64-unknown-linux-gnu" + +; Make sure that we don't crash when legalizng vselect and vsetcc and that +; we are able to generate vector blend instructions. + +; CHECK: simple_widen +; CHECK: blend +; CHECK: ret +define void @simple_widen() { +entry: + %0 = select <2 x i1> undef, <2 x float> undef, <2 x float> undef + store <2 x float> %0, <2 x float>* undef + ret void +} + +; CHECK: complex_inreg_work +; CHECK: blend +; CHECK: ret + +define void @complex_inreg_work() { +entry: + %0 = fcmp oeq <2 x float> undef, undef + %1 = select <2 x i1> %0, <2 x float> undef, <2 x float> undef + store <2 x float> %1, <2 x float>* undef + ret void +} + +; CHECK: zero_test +; CHECK: blend +; CHECK: ret + +define void @zero_test() { +entry: + %0 = select <2 x i1> undef, <2 x float> undef, <2 x float> zeroinitializer + store <2 x float> %0, <2 x float>* undef + ret void +} + +; CHECK: full_test +; CHECK: blend +; CHECK: ret + +define void @full_test() { + entry: + %Cy300 = alloca <4 x float> + %Cy11a = alloca <2 x float> + %Cy118 = alloca <2 x float> + %Cy119 = alloca <2 x float> + br label %B1 + + B1: ; preds = %entry + %0 = load <2 x float>* %Cy119 + %1 = fptosi <2 x float> %0 to <2 x i32> + %2 = sitofp <2 x i32> %1 to <2 x float> + %3 = fcmp ogt <2 x float> %0, zeroinitializer + %4 = fadd <2 x float> %2, <float 1.000000e+00, float 1.000000e+00> + %5 = select <2 x i1> %3, <2 x float> %4, <2 x float> %2 + %6 = fcmp oeq <2 x float> %2, %0 + %7 = select <2 x i1> %6, <2 x float> %0, <2 x float> %5 + store <2 x float> %7, <2 x float>* %Cy118 + %8 = load <2 x float>* %Cy118 + store <2 x float> %8, <2 x float>* %Cy11a + ret void +} + + diff --git a/test/CodeGen/X86/Atomics-32.ll b/test/CodeGen/X86/Atomics-32.ll deleted file mode 100644 index 0e9b73e..0000000 --- a/test/CodeGen/X86/Atomics-32.ll +++ /dev/null @@ -1,818 +0,0 @@ -; RUN: llc < %s -march=x86 > %t -;; Note the 64-bit variants are not supported yet (in 32-bit mode). -; ModuleID = 'Atomics.c' -target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" -target triple = "i386-apple-darwin8" -@sc = common global i8 0 ; <i8*> [#uses=52] -@uc = common global i8 0 ; <i8*> [#uses=100] -@ss = common global i16 0 ; <i16*> [#uses=15] -@us = common global i16 0 ; <i16*> [#uses=15] -@si = common global i32 0 ; <i32*> [#uses=15] -@ui = common global i32 0 ; <i32*> [#uses=23] -@sl = common global i32 0 ; <i32*> [#uses=15] -@ul = common global i32 0 ; <i32*> [#uses=15] - -define void @test_op_ignore() nounwind { -entry: - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0] - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=0] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=0] - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:14 [#uses=0] - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:15 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; <i16>:17 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; <i16>:19 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; <i32>:21 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; <i32>:23 [#uses=0] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; <i32>:25 [#uses=0] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; <i32>:27 [#uses=0] - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:28 [#uses=0] - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:29 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; <i16>:31 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; <i16>:33 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; <i32>:35 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; <i32>:37 [#uses=0] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 1 ) ; <i32>:39 [#uses=0] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 1 ) ; <i32>:41 [#uses=0] - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:42 [#uses=0] - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:43 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; <i16>:45 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; <i16>:47 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; <i32>:49 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; <i32>:51 [#uses=0] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 1 ) ; <i32>:53 [#uses=0] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 1 ) ; <i32>:55 [#uses=0] - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:56 [#uses=0] - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:57 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; <i16>:61 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; <i32>:65 [#uses=0] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 1 ) ; <i32>:67 [#uses=0] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 1 ) ; <i32>:69 [#uses=0] - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:70 [#uses=0] - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:71 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; <i16>:73 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; <i32>:77 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 1 ) ; <i32>:81 [#uses=0] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 1 ) ; <i32>:83 [#uses=0] - br label %return - -return: ; preds = %entry - ret void -} - -declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind - -declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind - -declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind - -declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind - -declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind - -declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind - -define void @test_fetch_and_op() nounwind { -entry: - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1] - store i8 %0, i8* @sc, align 1 - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1] - store i8 %1, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1] - store i16 %3, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1] - store i16 %5, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1] - store i32 %7, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1] - store i32 %9, i32* @ui, align 4 - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 11 ) ; <i32>:11 [#uses=1] - store i32 %11, i32* @sl, align 4 - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 11 ) ; <i32>:13 [#uses=1] - store i32 %13, i32* @ul, align 4 - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:14 [#uses=1] - store i8 %14, i8* @sc, align 1 - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:15 [#uses=1] - store i8 %15, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; <i16>:17 [#uses=1] - store i16 %17, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; <i16>:19 [#uses=1] - store i16 %19, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; <i32>:21 [#uses=1] - store i32 %21, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; <i32>:23 [#uses=1] - store i32 %23, i32* @ui, align 4 - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; <i32>:25 [#uses=1] - store i32 %25, i32* @sl, align 4 - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; <i32>:27 [#uses=1] - store i32 %27, i32* @ul, align 4 - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:28 [#uses=1] - store i8 %28, i8* @sc, align 1 - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:29 [#uses=1] - store i8 %29, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; <i16>:31 [#uses=1] - store i16 %31, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; <i16>:33 [#uses=1] - store i16 %33, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; <i32>:35 [#uses=1] - store i32 %35, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; <i32>:37 [#uses=1] - store i32 %37, i32* @ui, align 4 - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 11 ) ; <i32>:39 [#uses=1] - store i32 %39, i32* @sl, align 4 - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 11 ) ; <i32>:41 [#uses=1] - store i32 %41, i32* @ul, align 4 - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:42 [#uses=1] - store i8 %42, i8* @sc, align 1 - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:43 [#uses=1] - store i8 %43, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; <i16>:45 [#uses=1] - store i16 %45, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; <i16>:47 [#uses=1] - store i16 %47, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; <i32>:49 [#uses=1] - store i32 %49, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; <i32>:51 [#uses=1] - store i32 %51, i32* @ui, align 4 - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 11 ) ; <i32>:53 [#uses=1] - store i32 %53, i32* @sl, align 4 - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 11 ) ; <i32>:55 [#uses=1] - store i32 %55, i32* @ul, align 4 - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:56 [#uses=1] - store i8 %56, i8* @sc, align 1 - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:57 [#uses=1] - store i8 %57, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1] - store i16 %59, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; <i16>:61 [#uses=1] - store i16 %61, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1] - store i32 %63, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; <i32>:65 [#uses=1] - store i32 %65, i32* @ui, align 4 - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 11 ) ; <i32>:67 [#uses=1] - store i32 %67, i32* @sl, align 4 - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 11 ) ; <i32>:69 [#uses=1] - store i32 %69, i32* @ul, align 4 - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:70 [#uses=1] - store i8 %70, i8* @sc, align 1 - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:71 [#uses=1] - store i8 %71, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; <i16>:73 [#uses=1] - store i16 %73, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1] - store i16 %75, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; <i32>:77 [#uses=1] - store i32 %77, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1] - store i32 %79, i32* @ui, align 4 - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 11 ) ; <i32>:81 [#uses=1] - store i32 %81, i32* @sl, align 4 - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 11 ) ; <i32>:83 [#uses=1] - store i32 %83, i32* @ul, align 4 - br label %return - -return: ; preds = %entry - ret void -} - -define void @test_op_and_fetch() nounwind { -entry: - load i8* @uc, align 1 ; <i8>:0 [#uses=1] - zext i8 %0 to i32 ; <i32>:1 [#uses=1] - trunc i32 %1 to i8 ; <i8>:2 [#uses=2] - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %2 ) ; <i8>:3 [#uses=1] - add i8 %3, %2 ; <i8>:4 [#uses=1] - store i8 %4, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:5 [#uses=1] - zext i8 %5 to i32 ; <i32>:6 [#uses=1] - trunc i32 %6 to i8 ; <i8>:7 [#uses=2] - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %7 ) ; <i8>:8 [#uses=1] - add i8 %8, %7 ; <i8>:9 [#uses=1] - store i8 %9, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:10 [#uses=1] - zext i8 %10 to i32 ; <i32>:11 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:12 [#uses=1] - trunc i32 %11 to i16 ; <i16>:13 [#uses=2] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %12, i16 %13 ) ; <i16>:14 [#uses=1] - add i16 %14, %13 ; <i16>:15 [#uses=1] - store i16 %15, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:16 [#uses=1] - zext i8 %16 to i32 ; <i32>:17 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1] - trunc i32 %17 to i16 ; <i16>:19 [#uses=2] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %18, i16 %19 ) ; <i16>:20 [#uses=1] - add i16 %20, %19 ; <i16>:21 [#uses=1] - store i16 %21, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:22 [#uses=1] - zext i8 %22 to i32 ; <i32>:23 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %24, i32 %23 ) ; <i32>:25 [#uses=1] - add i32 %25, %23 ; <i32>:26 [#uses=1] - store i32 %26, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:27 [#uses=1] - zext i8 %27 to i32 ; <i32>:28 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:29 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %29, i32 %28 ) ; <i32>:30 [#uses=1] - add i32 %30, %28 ; <i32>:31 [#uses=1] - store i32 %31, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:32 [#uses=1] - zext i8 %32 to i32 ; <i32>:33 [#uses=2] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:34 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %34, i32 %33 ) ; <i32>:35 [#uses=1] - add i32 %35, %33 ; <i32>:36 [#uses=1] - store i32 %36, i32* @sl, align 4 - load i8* @uc, align 1 ; <i8>:37 [#uses=1] - zext i8 %37 to i32 ; <i32>:38 [#uses=2] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:39 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %39, i32 %38 ) ; <i32>:40 [#uses=1] - add i32 %40, %38 ; <i32>:41 [#uses=1] - store i32 %41, i32* @ul, align 4 - load i8* @uc, align 1 ; <i8>:42 [#uses=1] - zext i8 %42 to i32 ; <i32>:43 [#uses=1] - trunc i32 %43 to i8 ; <i8>:44 [#uses=2] - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %44 ) ; <i8>:45 [#uses=1] - sub i8 %45, %44 ; <i8>:46 [#uses=1] - store i8 %46, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:47 [#uses=1] - zext i8 %47 to i32 ; <i32>:48 [#uses=1] - trunc i32 %48 to i8 ; <i8>:49 [#uses=2] - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %49 ) ; <i8>:50 [#uses=1] - sub i8 %50, %49 ; <i8>:51 [#uses=1] - store i8 %51, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:52 [#uses=1] - zext i8 %52 to i32 ; <i32>:53 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:54 [#uses=1] - trunc i32 %53 to i16 ; <i16>:55 [#uses=2] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %54, i16 %55 ) ; <i16>:56 [#uses=1] - sub i16 %56, %55 ; <i16>:57 [#uses=1] - store i16 %57, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:58 [#uses=1] - zext i8 %58 to i32 ; <i32>:59 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1] - trunc i32 %59 to i16 ; <i16>:61 [#uses=2] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %60, i16 %61 ) ; <i16>:62 [#uses=1] - sub i16 %62, %61 ; <i16>:63 [#uses=1] - store i16 %63, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:64 [#uses=1] - zext i8 %64 to i32 ; <i32>:65 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:66 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %66, i32 %65 ) ; <i32>:67 [#uses=1] - sub i32 %67, %65 ; <i32>:68 [#uses=1] - store i32 %68, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:69 [#uses=1] - zext i8 %69 to i32 ; <i32>:70 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:71 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %71, i32 %70 ) ; <i32>:72 [#uses=1] - sub i32 %72, %70 ; <i32>:73 [#uses=1] - store i32 %73, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:74 [#uses=1] - zext i8 %74 to i32 ; <i32>:75 [#uses=2] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:76 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %76, i32 %75 ) ; <i32>:77 [#uses=1] - sub i32 %77, %75 ; <i32>:78 [#uses=1] - store i32 %78, i32* @sl, align 4 - load i8* @uc, align 1 ; <i8>:79 [#uses=1] - zext i8 %79 to i32 ; <i32>:80 [#uses=2] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:81 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %81, i32 %80 ) ; <i32>:82 [#uses=1] - sub i32 %82, %80 ; <i32>:83 [#uses=1] - store i32 %83, i32* @ul, align 4 - load i8* @uc, align 1 ; <i8>:84 [#uses=1] - zext i8 %84 to i32 ; <i32>:85 [#uses=1] - trunc i32 %85 to i8 ; <i8>:86 [#uses=2] - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %86 ) ; <i8>:87 [#uses=1] - or i8 %87, %86 ; <i8>:88 [#uses=1] - store i8 %88, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:89 [#uses=1] - zext i8 %89 to i32 ; <i32>:90 [#uses=1] - trunc i32 %90 to i8 ; <i8>:91 [#uses=2] - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %91 ) ; <i8>:92 [#uses=1] - or i8 %92, %91 ; <i8>:93 [#uses=1] - store i8 %93, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:94 [#uses=1] - zext i8 %94 to i32 ; <i32>:95 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:96 [#uses=1] - trunc i32 %95 to i16 ; <i16>:97 [#uses=2] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %96, i16 %97 ) ; <i16>:98 [#uses=1] - or i16 %98, %97 ; <i16>:99 [#uses=1] - store i16 %99, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:100 [#uses=1] - zext i8 %100 to i32 ; <i32>:101 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:102 [#uses=1] - trunc i32 %101 to i16 ; <i16>:103 [#uses=2] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %102, i16 %103 ) ; <i16>:104 [#uses=1] - or i16 %104, %103 ; <i16>:105 [#uses=1] - store i16 %105, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:106 [#uses=1] - zext i8 %106 to i32 ; <i32>:107 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:108 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %108, i32 %107 ) ; <i32>:109 [#uses=1] - or i32 %109, %107 ; <i32>:110 [#uses=1] - store i32 %110, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:111 [#uses=1] - zext i8 %111 to i32 ; <i32>:112 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:113 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %113, i32 %112 ) ; <i32>:114 [#uses=1] - or i32 %114, %112 ; <i32>:115 [#uses=1] - store i32 %115, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:116 [#uses=1] - zext i8 %116 to i32 ; <i32>:117 [#uses=2] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:118 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %118, i32 %117 ) ; <i32>:119 [#uses=1] - or i32 %119, %117 ; <i32>:120 [#uses=1] - store i32 %120, i32* @sl, align 4 - load i8* @uc, align 1 ; <i8>:121 [#uses=1] - zext i8 %121 to i32 ; <i32>:122 [#uses=2] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:123 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %123, i32 %122 ) ; <i32>:124 [#uses=1] - or i32 %124, %122 ; <i32>:125 [#uses=1] - store i32 %125, i32* @ul, align 4 - load i8* @uc, align 1 ; <i8>:126 [#uses=1] - zext i8 %126 to i32 ; <i32>:127 [#uses=1] - trunc i32 %127 to i8 ; <i8>:128 [#uses=2] - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %128 ) ; <i8>:129 [#uses=1] - xor i8 %129, %128 ; <i8>:130 [#uses=1] - store i8 %130, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:131 [#uses=1] - zext i8 %131 to i32 ; <i32>:132 [#uses=1] - trunc i32 %132 to i8 ; <i8>:133 [#uses=2] - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %133 ) ; <i8>:134 [#uses=1] - xor i8 %134, %133 ; <i8>:135 [#uses=1] - store i8 %135, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:136 [#uses=1] - zext i8 %136 to i32 ; <i32>:137 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:138 [#uses=1] - trunc i32 %137 to i16 ; <i16>:139 [#uses=2] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %138, i16 %139 ) ; <i16>:140 [#uses=1] - xor i16 %140, %139 ; <i16>:141 [#uses=1] - store i16 %141, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:142 [#uses=1] - zext i8 %142 to i32 ; <i32>:143 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:144 [#uses=1] - trunc i32 %143 to i16 ; <i16>:145 [#uses=2] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %144, i16 %145 ) ; <i16>:146 [#uses=1] - xor i16 %146, %145 ; <i16>:147 [#uses=1] - store i16 %147, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:148 [#uses=1] - zext i8 %148 to i32 ; <i32>:149 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:150 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %150, i32 %149 ) ; <i32>:151 [#uses=1] - xor i32 %151, %149 ; <i32>:152 [#uses=1] - store i32 %152, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:153 [#uses=1] - zext i8 %153 to i32 ; <i32>:154 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:155 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %155, i32 %154 ) ; <i32>:156 [#uses=1] - xor i32 %156, %154 ; <i32>:157 [#uses=1] - store i32 %157, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:158 [#uses=1] - zext i8 %158 to i32 ; <i32>:159 [#uses=2] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:160 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %160, i32 %159 ) ; <i32>:161 [#uses=1] - xor i32 %161, %159 ; <i32>:162 [#uses=1] - store i32 %162, i32* @sl, align 4 - load i8* @uc, align 1 ; <i8>:163 [#uses=1] - zext i8 %163 to i32 ; <i32>:164 [#uses=2] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:165 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %165, i32 %164 ) ; <i32>:166 [#uses=1] - xor i32 %166, %164 ; <i32>:167 [#uses=1] - store i32 %167, i32* @ul, align 4 - load i8* @uc, align 1 ; <i8>:168 [#uses=1] - zext i8 %168 to i32 ; <i32>:169 [#uses=1] - trunc i32 %169 to i8 ; <i8>:170 [#uses=2] - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %170 ) ; <i8>:171 [#uses=1] - and i8 %171, %170 ; <i8>:172 [#uses=1] - store i8 %172, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:173 [#uses=1] - zext i8 %173 to i32 ; <i32>:174 [#uses=1] - trunc i32 %174 to i8 ; <i8>:175 [#uses=2] - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %175 ) ; <i8>:176 [#uses=1] - and i8 %176, %175 ; <i8>:177 [#uses=1] - store i8 %177, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:178 [#uses=1] - zext i8 %178 to i32 ; <i32>:179 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:180 [#uses=1] - trunc i32 %179 to i16 ; <i16>:181 [#uses=2] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %180, i16 %181 ) ; <i16>:182 [#uses=1] - and i16 %182, %181 ; <i16>:183 [#uses=1] - store i16 %183, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:184 [#uses=1] - zext i8 %184 to i32 ; <i32>:185 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:186 [#uses=1] - trunc i32 %185 to i16 ; <i16>:187 [#uses=2] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %186, i16 %187 ) ; <i16>:188 [#uses=1] - and i16 %188, %187 ; <i16>:189 [#uses=1] - store i16 %189, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:190 [#uses=1] - zext i8 %190 to i32 ; <i32>:191 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:192 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %192, i32 %191 ) ; <i32>:193 [#uses=1] - and i32 %193, %191 ; <i32>:194 [#uses=1] - store i32 %194, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:195 [#uses=1] - zext i8 %195 to i32 ; <i32>:196 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:197 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %197, i32 %196 ) ; <i32>:198 [#uses=1] - and i32 %198, %196 ; <i32>:199 [#uses=1] - store i32 %199, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:200 [#uses=1] - zext i8 %200 to i32 ; <i32>:201 [#uses=2] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:202 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %202, i32 %201 ) ; <i32>:203 [#uses=1] - and i32 %203, %201 ; <i32>:204 [#uses=1] - store i32 %204, i32* @sl, align 4 - load i8* @uc, align 1 ; <i8>:205 [#uses=1] - zext i8 %205 to i32 ; <i32>:206 [#uses=2] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:207 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %207, i32 %206 ) ; <i32>:208 [#uses=1] - and i32 %208, %206 ; <i32>:209 [#uses=1] - store i32 %209, i32* @ul, align 4 - load i8* @uc, align 1 ; <i8>:210 [#uses=1] - zext i8 %210 to i32 ; <i32>:211 [#uses=1] - trunc i32 %211 to i8 ; <i8>:212 [#uses=2] - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %212 ) ; <i8>:213 [#uses=1] - xor i8 %213, -1 ; <i8>:214 [#uses=1] - and i8 %214, %212 ; <i8>:215 [#uses=1] - store i8 %215, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:216 [#uses=1] - zext i8 %216 to i32 ; <i32>:217 [#uses=1] - trunc i32 %217 to i8 ; <i8>:218 [#uses=2] - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %218 ) ; <i8>:219 [#uses=1] - xor i8 %219, -1 ; <i8>:220 [#uses=1] - and i8 %220, %218 ; <i8>:221 [#uses=1] - store i8 %221, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:222 [#uses=1] - zext i8 %222 to i32 ; <i32>:223 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:224 [#uses=1] - trunc i32 %223 to i16 ; <i16>:225 [#uses=2] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %224, i16 %225 ) ; <i16>:226 [#uses=1] - xor i16 %226, -1 ; <i16>:227 [#uses=1] - and i16 %227, %225 ; <i16>:228 [#uses=1] - store i16 %228, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:229 [#uses=1] - zext i8 %229 to i32 ; <i32>:230 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:231 [#uses=1] - trunc i32 %230 to i16 ; <i16>:232 [#uses=2] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %231, i16 %232 ) ; <i16>:233 [#uses=1] - xor i16 %233, -1 ; <i16>:234 [#uses=1] - and i16 %234, %232 ; <i16>:235 [#uses=1] - store i16 %235, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:236 [#uses=1] - zext i8 %236 to i32 ; <i32>:237 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:238 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %238, i32 %237 ) ; <i32>:239 [#uses=1] - xor i32 %239, -1 ; <i32>:240 [#uses=1] - and i32 %240, %237 ; <i32>:241 [#uses=1] - store i32 %241, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:242 [#uses=1] - zext i8 %242 to i32 ; <i32>:243 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:244 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %244, i32 %243 ) ; <i32>:245 [#uses=1] - xor i32 %245, -1 ; <i32>:246 [#uses=1] - and i32 %246, %243 ; <i32>:247 [#uses=1] - store i32 %247, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:248 [#uses=1] - zext i8 %248 to i32 ; <i32>:249 [#uses=2] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:250 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %250, i32 %249 ) ; <i32>:251 [#uses=1] - xor i32 %251, -1 ; <i32>:252 [#uses=1] - and i32 %252, %249 ; <i32>:253 [#uses=1] - store i32 %253, i32* @sl, align 4 - load i8* @uc, align 1 ; <i8>:254 [#uses=1] - zext i8 %254 to i32 ; <i32>:255 [#uses=2] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:256 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %256, i32 %255 ) ; <i32>:257 [#uses=1] - xor i32 %257, -1 ; <i32>:258 [#uses=1] - and i32 %258, %255 ; <i32>:259 [#uses=1] - store i32 %259, i32* @ul, align 4 - br label %return - -return: ; preds = %entry - ret void -} - -define void @test_compare_and_swap() nounwind { -entry: - load i8* @sc, align 1 ; <i8>:0 [#uses=1] - zext i8 %0 to i32 ; <i32>:1 [#uses=1] - load i8* @uc, align 1 ; <i8>:2 [#uses=1] - zext i8 %2 to i32 ; <i32>:3 [#uses=1] - trunc i32 %3 to i8 ; <i8>:4 [#uses=1] - trunc i32 %1 to i8 ; <i8>:5 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %4, i8 %5 ) ; <i8>:6 [#uses=1] - store i8 %6, i8* @sc, align 1 - load i8* @sc, align 1 ; <i8>:7 [#uses=1] - zext i8 %7 to i32 ; <i32>:8 [#uses=1] - load i8* @uc, align 1 ; <i8>:9 [#uses=1] - zext i8 %9 to i32 ; <i32>:10 [#uses=1] - trunc i32 %10 to i8 ; <i8>:11 [#uses=1] - trunc i32 %8 to i8 ; <i8>:12 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %11, i8 %12 ) ; <i8>:13 [#uses=1] - store i8 %13, i8* @uc, align 1 - load i8* @sc, align 1 ; <i8>:14 [#uses=1] - sext i8 %14 to i16 ; <i16>:15 [#uses=1] - zext i16 %15 to i32 ; <i32>:16 [#uses=1] - load i8* @uc, align 1 ; <i8>:17 [#uses=1] - zext i8 %17 to i32 ; <i32>:18 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:19 [#uses=1] - trunc i32 %18 to i16 ; <i16>:20 [#uses=1] - trunc i32 %16 to i16 ; <i16>:21 [#uses=1] - call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %19, i16 %20, i16 %21 ) ; <i16>:22 [#uses=1] - store i16 %22, i16* @ss, align 2 - load i8* @sc, align 1 ; <i8>:23 [#uses=1] - sext i8 %23 to i16 ; <i16>:24 [#uses=1] - zext i16 %24 to i32 ; <i32>:25 [#uses=1] - load i8* @uc, align 1 ; <i8>:26 [#uses=1] - zext i8 %26 to i32 ; <i32>:27 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:28 [#uses=1] - trunc i32 %27 to i16 ; <i16>:29 [#uses=1] - trunc i32 %25 to i16 ; <i16>:30 [#uses=1] - call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %28, i16 %29, i16 %30 ) ; <i16>:31 [#uses=1] - store i16 %31, i16* @us, align 2 - load i8* @sc, align 1 ; <i8>:32 [#uses=1] - sext i8 %32 to i32 ; <i32>:33 [#uses=1] - load i8* @uc, align 1 ; <i8>:34 [#uses=1] - zext i8 %34 to i32 ; <i32>:35 [#uses=1] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:36 [#uses=1] - call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %36, i32 %35, i32 %33 ) ; <i32>:37 [#uses=1] - store i32 %37, i32* @si, align 4 - load i8* @sc, align 1 ; <i8>:38 [#uses=1] - sext i8 %38 to i32 ; <i32>:39 [#uses=1] - load i8* @uc, align 1 ; <i8>:40 [#uses=1] - zext i8 %40 to i32 ; <i32>:41 [#uses=1] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:42 [#uses=1] - call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %42, i32 %41, i32 %39 ) ; <i32>:43 [#uses=1] - store i32 %43, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:44 [#uses=1] - sext i8 %44 to i32 ; <i32>:45 [#uses=1] - load i8* @uc, align 1 ; <i8>:46 [#uses=1] - zext i8 %46 to i32 ; <i32>:47 [#uses=1] - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:48 [#uses=1] - call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %48, i32 %47, i32 %45 ) ; <i32>:49 [#uses=1] - store i32 %49, i32* @sl, align 4 - load i8* @sc, align 1 ; <i8>:50 [#uses=1] - sext i8 %50 to i32 ; <i32>:51 [#uses=1] - load i8* @uc, align 1 ; <i8>:52 [#uses=1] - zext i8 %52 to i32 ; <i32>:53 [#uses=1] - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1] - call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %54, i32 %53, i32 %51 ) ; <i32>:55 [#uses=1] - store i32 %55, i32* @ul, align 4 - load i8* @sc, align 1 ; <i8>:56 [#uses=1] - zext i8 %56 to i32 ; <i32>:57 [#uses=1] - load i8* @uc, align 1 ; <i8>:58 [#uses=1] - zext i8 %58 to i32 ; <i32>:59 [#uses=1] - trunc i32 %59 to i8 ; <i8>:60 [#uses=2] - trunc i32 %57 to i8 ; <i8>:61 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %60, i8 %61 ) ; <i8>:62 [#uses=1] - icmp eq i8 %62, %60 ; <i1>:63 [#uses=1] - zext i1 %63 to i8 ; <i8>:64 [#uses=1] - zext i8 %64 to i32 ; <i32>:65 [#uses=1] - store i32 %65, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:66 [#uses=1] - zext i8 %66 to i32 ; <i32>:67 [#uses=1] - load i8* @uc, align 1 ; <i8>:68 [#uses=1] - zext i8 %68 to i32 ; <i32>:69 [#uses=1] - trunc i32 %69 to i8 ; <i8>:70 [#uses=2] - trunc i32 %67 to i8 ; <i8>:71 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %70, i8 %71 ) ; <i8>:72 [#uses=1] - icmp eq i8 %72, %70 ; <i1>:73 [#uses=1] - zext i1 %73 to i8 ; <i8>:74 [#uses=1] - zext i8 %74 to i32 ; <i32>:75 [#uses=1] - store i32 %75, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:76 [#uses=1] - sext i8 %76 to i16 ; <i16>:77 [#uses=1] - zext i16 %77 to i32 ; <i32>:78 [#uses=1] - load i8* @uc, align 1 ; <i8>:79 [#uses=1] - zext i8 %79 to i32 ; <i32>:80 [#uses=1] - trunc i32 %80 to i8 ; <i8>:81 [#uses=2] - trunc i32 %78 to i8 ; <i8>:82 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @ss to i8*), i8 %81, i8 %82 ) ; <i8>:83 [#uses=1] - icmp eq i8 %83, %81 ; <i1>:84 [#uses=1] - zext i1 %84 to i8 ; <i8>:85 [#uses=1] - zext i8 %85 to i32 ; <i32>:86 [#uses=1] - store i32 %86, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:87 [#uses=1] - sext i8 %87 to i16 ; <i16>:88 [#uses=1] - zext i16 %88 to i32 ; <i32>:89 [#uses=1] - load i8* @uc, align 1 ; <i8>:90 [#uses=1] - zext i8 %90 to i32 ; <i32>:91 [#uses=1] - trunc i32 %91 to i8 ; <i8>:92 [#uses=2] - trunc i32 %89 to i8 ; <i8>:93 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @us to i8*), i8 %92, i8 %93 ) ; <i8>:94 [#uses=1] - icmp eq i8 %94, %92 ; <i1>:95 [#uses=1] - zext i1 %95 to i8 ; <i8>:96 [#uses=1] - zext i8 %96 to i32 ; <i32>:97 [#uses=1] - store i32 %97, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:98 [#uses=1] - sext i8 %98 to i32 ; <i32>:99 [#uses=1] - load i8* @uc, align 1 ; <i8>:100 [#uses=1] - zext i8 %100 to i32 ; <i32>:101 [#uses=1] - trunc i32 %101 to i8 ; <i8>:102 [#uses=2] - trunc i32 %99 to i8 ; <i8>:103 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @si to i8*), i8 %102, i8 %103 ) ; <i8>:104 [#uses=1] - icmp eq i8 %104, %102 ; <i1>:105 [#uses=1] - zext i1 %105 to i8 ; <i8>:106 [#uses=1] - zext i8 %106 to i32 ; <i32>:107 [#uses=1] - store i32 %107, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:108 [#uses=1] - sext i8 %108 to i32 ; <i32>:109 [#uses=1] - load i8* @uc, align 1 ; <i8>:110 [#uses=1] - zext i8 %110 to i32 ; <i32>:111 [#uses=1] - trunc i32 %111 to i8 ; <i8>:112 [#uses=2] - trunc i32 %109 to i8 ; <i8>:113 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ui to i8*), i8 %112, i8 %113 ) ; <i8>:114 [#uses=1] - icmp eq i8 %114, %112 ; <i1>:115 [#uses=1] - zext i1 %115 to i8 ; <i8>:116 [#uses=1] - zext i8 %116 to i32 ; <i32>:117 [#uses=1] - store i32 %117, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:118 [#uses=1] - sext i8 %118 to i32 ; <i32>:119 [#uses=1] - load i8* @uc, align 1 ; <i8>:120 [#uses=1] - zext i8 %120 to i32 ; <i32>:121 [#uses=1] - trunc i32 %121 to i8 ; <i8>:122 [#uses=2] - trunc i32 %119 to i8 ; <i8>:123 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @sl to i8*), i8 %122, i8 %123 ) ; <i8>:124 [#uses=1] - icmp eq i8 %124, %122 ; <i1>:125 [#uses=1] - zext i1 %125 to i8 ; <i8>:126 [#uses=1] - zext i8 %126 to i32 ; <i32>:127 [#uses=1] - store i32 %127, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:128 [#uses=1] - sext i8 %128 to i32 ; <i32>:129 [#uses=1] - load i8* @uc, align 1 ; <i8>:130 [#uses=1] - zext i8 %130 to i32 ; <i32>:131 [#uses=1] - trunc i32 %131 to i8 ; <i8>:132 [#uses=2] - trunc i32 %129 to i8 ; <i8>:133 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ul to i8*), i8 %132, i8 %133 ) ; <i8>:134 [#uses=1] - icmp eq i8 %134, %132 ; <i1>:135 [#uses=1] - zext i1 %135 to i8 ; <i8>:136 [#uses=1] - zext i8 %136 to i32 ; <i32>:137 [#uses=1] - store i32 %137, i32* @ui, align 4 - br label %return - -return: ; preds = %entry - ret void -} - -declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind - -declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind - -declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind - -define void @test_lock() nounwind { -entry: - call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1] - store i8 %0, i8* @sc, align 1 - call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1] - store i8 %1, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] - call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1] - store i16 %3, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] - call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1] - store i16 %5, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] - call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1] - store i32 %7, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] - call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1] - store i32 %9, i32* @ui, align 4 - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1] - call i32 @llvm.atomic.swap.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=1] - store i32 %11, i32* @sl, align 4 - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1] - call i32 @llvm.atomic.swap.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=1] - store i32 %13, i32* @ul, align 4 - call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false ) - volatile store i8 0, i8* @sc, align 1 - volatile store i8 0, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:14 [#uses=1] - volatile store i16 0, i16* %14, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:15 [#uses=1] - volatile store i16 0, i16* %15, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:16 [#uses=1] - volatile store i32 0, i32* %16, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:17 [#uses=1] - volatile store i32 0, i32* %17, align 4 - bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:18 [#uses=1] - volatile store i32 0, i32* %18, align 4 - bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:19 [#uses=1] - volatile store i32 0, i32* %19, align 4 - br label %return - -return: ; preds = %entry - ret void -} - -declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind - -declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind diff --git a/test/CodeGen/X86/Atomics-64.ll b/test/CodeGen/X86/Atomics-64.ll index ac174b9..8e93762 100644 --- a/test/CodeGen/X86/Atomics-64.ll +++ b/test/CodeGen/X86/Atomics-64.ll @@ -1,1015 +1,950 @@ ; RUN: llc < %s -march=x86-64 > %t -; ModuleID = 'Atomics.c' +; RUN: llc < %s -march=x86 > %t target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" target triple = "x86_64-apple-darwin8" -@sc = common global i8 0 ; <i8*> [#uses=56] -@uc = common global i8 0 ; <i8*> [#uses=116] -@ss = common global i16 0 ; <i16*> [#uses=15] -@us = common global i16 0 ; <i16*> [#uses=15] -@si = common global i32 0 ; <i32*> [#uses=15] -@ui = common global i32 0 ; <i32*> [#uses=25] -@sl = common global i64 0 ; <i64*> [#uses=15] -@ul = common global i64 0 ; <i64*> [#uses=15] -@sll = common global i64 0 ; <i64*> [#uses=15] -@ull = common global i64 0 ; <i64*> [#uses=15] + +@sc = common global i8 0 +@uc = common global i8 0 +@ss = common global i16 0 +@us = common global i16 0 +@si = common global i32 0 +@ui = common global i32 0 +@sl = common global i64 0 +@ul = common global i64 0 +@sll = common global i64 0 +@ull = common global i64 0 define void @test_op_ignore() nounwind { entry: - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0] - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=0] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=0] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %14, i64 1 ) ; <i64>:15 [#uses=0] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %16, i64 1 ) ; <i64>:17 [#uses=0] - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:18 [#uses=0] - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:19 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:20 [#uses=1] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %20, i16 1 ) ; <i16>:21 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:22 [#uses=1] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %22, i16 1 ) ; <i16>:23 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; <i32>:25 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:26 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; <i32>:27 [#uses=0] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:28 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %28, i64 1 ) ; <i64>:29 [#uses=0] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:30 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %30, i64 1 ) ; <i64>:31 [#uses=0] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:32 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %32, i64 1 ) ; <i64>:33 [#uses=0] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:34 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %34, i64 1 ) ; <i64>:35 [#uses=0] - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:36 [#uses=0] - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:37 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:38 [#uses=1] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %38, i16 1 ) ; <i16>:39 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:40 [#uses=1] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %40, i16 1 ) ; <i16>:41 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:42 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %42, i32 1 ) ; <i32>:43 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:44 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %44, i32 1 ) ; <i32>:45 [#uses=0] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:46 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %46, i64 1 ) ; <i64>:47 [#uses=0] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:48 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %48, i64 1 ) ; <i64>:49 [#uses=0] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:50 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %50, i64 1 ) ; <i64>:51 [#uses=0] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:52 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %52, i64 1 ) ; <i64>:53 [#uses=0] - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:54 [#uses=0] - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:55 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:56 [#uses=1] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %56, i16 1 ) ; <i16>:57 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:58 [#uses=1] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:60 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %60, i32 1 ) ; <i32>:61 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:62 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:64 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %64, i64 1 ) ; <i64>:65 [#uses=0] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:66 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %66, i64 1 ) ; <i64>:67 [#uses=0] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:68 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %68, i64 1 ) ; <i64>:69 [#uses=0] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:70 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %70, i64 1 ) ; <i64>:71 [#uses=0] - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:72 [#uses=0] - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:73 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:74 [#uses=1] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:76 [#uses=1] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %76, i16 1 ) ; <i16>:77 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:78 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:80 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %80, i32 1 ) ; <i32>:81 [#uses=0] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:82 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %82, i64 1 ) ; <i64>:83 [#uses=0] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:84 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %84, i64 1 ) ; <i64>:85 [#uses=0] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:86 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %86, i64 1 ) ; <i64>:87 [#uses=0] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:88 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %88, i64 1 ) ; <i64>:89 [#uses=0] - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:90 [#uses=0] - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:91 [#uses=0] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:92 [#uses=1] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %92, i16 1 ) ; <i16>:93 [#uses=0] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:94 [#uses=1] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %94, i16 1 ) ; <i16>:95 [#uses=0] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:96 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %96, i32 1 ) ; <i32>:97 [#uses=0] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:98 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %98, i32 1 ) ; <i32>:99 [#uses=0] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:100 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %100, i64 1 ) ; <i64>:101 [#uses=0] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:102 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %102, i64 1 ) ; <i64>:103 [#uses=0] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:104 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %104, i64 1 ) ; <i64>:105 [#uses=0] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:106 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %106, i64 1 ) ; <i64>:107 [#uses=0] - br label %return - -return: ; preds = %entry - ret void + %0 = atomicrmw add i8* @sc, i8 1 monotonic + %1 = atomicrmw add i8* @uc, i8 1 monotonic + %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %3 = atomicrmw add i16* %2, i16 1 monotonic + %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %5 = atomicrmw add i16* %4, i16 1 monotonic + %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %7 = atomicrmw add i32* %6, i32 1 monotonic + %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %9 = atomicrmw add i32* %8, i32 1 monotonic + %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %11 = atomicrmw add i64* %10, i64 1 monotonic + %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %13 = atomicrmw add i64* %12, i64 1 monotonic + %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %15 = atomicrmw add i64* %14, i64 1 monotonic + %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %17 = atomicrmw add i64* %16, i64 1 monotonic + %18 = atomicrmw sub i8* @sc, i8 1 monotonic + %19 = atomicrmw sub i8* @uc, i8 1 monotonic + %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %21 = atomicrmw sub i16* %20, i16 1 monotonic + %22 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %23 = atomicrmw sub i16* %22, i16 1 monotonic + %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %25 = atomicrmw sub i32* %24, i32 1 monotonic + %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %27 = atomicrmw sub i32* %26, i32 1 monotonic + %28 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %29 = atomicrmw sub i64* %28, i64 1 monotonic + %30 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %31 = atomicrmw sub i64* %30, i64 1 monotonic + %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %33 = atomicrmw sub i64* %32, i64 1 monotonic + %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %35 = atomicrmw sub i64* %34, i64 1 monotonic + %36 = atomicrmw or i8* @sc, i8 1 monotonic + %37 = atomicrmw or i8* @uc, i8 1 monotonic + %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %39 = atomicrmw or i16* %38, i16 1 monotonic + %40 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %41 = atomicrmw or i16* %40, i16 1 monotonic + %42 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %43 = atomicrmw or i32* %42, i32 1 monotonic + %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %45 = atomicrmw or i32* %44, i32 1 monotonic + %46 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %47 = atomicrmw or i64* %46, i64 1 monotonic + %48 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %49 = atomicrmw or i64* %48, i64 1 monotonic + %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %51 = atomicrmw or i64* %50, i64 1 monotonic + %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %53 = atomicrmw or i64* %52, i64 1 monotonic + %54 = atomicrmw xor i8* @sc, i8 1 monotonic + %55 = atomicrmw xor i8* @uc, i8 1 monotonic + %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %57 = atomicrmw xor i16* %56, i16 1 monotonic + %58 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %59 = atomicrmw xor i16* %58, i16 1 monotonic + %60 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %61 = atomicrmw xor i32* %60, i32 1 monotonic + %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %63 = atomicrmw xor i32* %62, i32 1 monotonic + %64 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %65 = atomicrmw xor i64* %64, i64 1 monotonic + %66 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %67 = atomicrmw xor i64* %66, i64 1 monotonic + %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %69 = atomicrmw xor i64* %68, i64 1 monotonic + %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %71 = atomicrmw xor i64* %70, i64 1 monotonic + %72 = atomicrmw and i8* @sc, i8 1 monotonic + %73 = atomicrmw and i8* @uc, i8 1 monotonic + %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %75 = atomicrmw and i16* %74, i16 1 monotonic + %76 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %77 = atomicrmw and i16* %76, i16 1 monotonic + %78 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %79 = atomicrmw and i32* %78, i32 1 monotonic + %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %81 = atomicrmw and i32* %80, i32 1 monotonic + %82 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %83 = atomicrmw and i64* %82, i64 1 monotonic + %84 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %85 = atomicrmw and i64* %84, i64 1 monotonic + %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %87 = atomicrmw and i64* %86, i64 1 monotonic + %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %89 = atomicrmw and i64* %88, i64 1 monotonic + %90 = atomicrmw nand i8* @sc, i8 1 monotonic + %91 = atomicrmw nand i8* @uc, i8 1 monotonic + %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %93 = atomicrmw nand i16* %92, i16 1 monotonic + %94 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %95 = atomicrmw nand i16* %94, i16 1 monotonic + %96 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %97 = atomicrmw nand i32* %96, i32 1 monotonic + %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %99 = atomicrmw nand i32* %98, i32 1 monotonic + %100 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %101 = atomicrmw nand i64* %100, i64 1 monotonic + %102 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %103 = atomicrmw nand i64* %102, i64 1 monotonic + %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %105 = atomicrmw nand i64* %104, i64 1 monotonic + %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %107 = atomicrmw nand i64* %106, i64 1 monotonic + br label %return + +return: ; preds = %entry + ret void } -declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind - -declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind - -declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind - -declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind - -declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind - -declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind - define void @test_fetch_and_op() nounwind { entry: - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1] - store i8 %0, i8* @sc, align 1 - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1] - store i8 %1, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1] - store i16 %3, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1] - store i16 %5, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1] - store i32 %7, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1] - store i32 %9, i32* @ui, align 4 - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 11 ) ; <i64>:11 [#uses=1] - store i64 %11, i64* @sl, align 8 - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 11 ) ; <i64>:13 [#uses=1] - store i64 %13, i64* @ul, align 8 - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %14, i64 11 ) ; <i64>:15 [#uses=1] - store i64 %15, i64* @sll, align 8 - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %16, i64 11 ) ; <i64>:17 [#uses=1] - store i64 %17, i64* @ull, align 8 - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:18 [#uses=1] - store i8 %18, i8* @sc, align 1 - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:19 [#uses=1] - store i8 %19, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:20 [#uses=1] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %20, i16 11 ) ; <i16>:21 [#uses=1] - store i16 %21, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:22 [#uses=1] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %22, i16 11 ) ; <i16>:23 [#uses=1] - store i16 %23, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; <i32>:25 [#uses=1] - store i32 %25, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:26 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; <i32>:27 [#uses=1] - store i32 %27, i32* @ui, align 4 - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:28 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %28, i64 11 ) ; <i64>:29 [#uses=1] - store i64 %29, i64* @sl, align 8 - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:30 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %30, i64 11 ) ; <i64>:31 [#uses=1] - store i64 %31, i64* @ul, align 8 - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:32 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %32, i64 11 ) ; <i64>:33 [#uses=1] - store i64 %33, i64* @sll, align 8 - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:34 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %34, i64 11 ) ; <i64>:35 [#uses=1] - store i64 %35, i64* @ull, align 8 - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:36 [#uses=1] - store i8 %36, i8* @sc, align 1 - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:37 [#uses=1] - store i8 %37, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:38 [#uses=1] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %38, i16 11 ) ; <i16>:39 [#uses=1] - store i16 %39, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:40 [#uses=1] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %40, i16 11 ) ; <i16>:41 [#uses=1] - store i16 %41, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:42 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %42, i32 11 ) ; <i32>:43 [#uses=1] - store i32 %43, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:44 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %44, i32 11 ) ; <i32>:45 [#uses=1] - store i32 %45, i32* @ui, align 4 - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:46 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %46, i64 11 ) ; <i64>:47 [#uses=1] - store i64 %47, i64* @sl, align 8 - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:48 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %48, i64 11 ) ; <i64>:49 [#uses=1] - store i64 %49, i64* @ul, align 8 - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:50 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %50, i64 11 ) ; <i64>:51 [#uses=1] - store i64 %51, i64* @sll, align 8 - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:52 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %52, i64 11 ) ; <i64>:53 [#uses=1] - store i64 %53, i64* @ull, align 8 - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:54 [#uses=1] - store i8 %54, i8* @sc, align 1 - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:55 [#uses=1] - store i8 %55, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:56 [#uses=1] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %56, i16 11 ) ; <i16>:57 [#uses=1] - store i16 %57, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:58 [#uses=1] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1] - store i16 %59, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:60 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %60, i32 11 ) ; <i32>:61 [#uses=1] - store i32 %61, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:62 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1] - store i32 %63, i32* @ui, align 4 - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:64 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %64, i64 11 ) ; <i64>:65 [#uses=1] - store i64 %65, i64* @sl, align 8 - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:66 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %66, i64 11 ) ; <i64>:67 [#uses=1] - store i64 %67, i64* @ul, align 8 - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:68 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %68, i64 11 ) ; <i64>:69 [#uses=1] - store i64 %69, i64* @sll, align 8 - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:70 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %70, i64 11 ) ; <i64>:71 [#uses=1] - store i64 %71, i64* @ull, align 8 - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:72 [#uses=1] - store i8 %72, i8* @sc, align 1 - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:73 [#uses=1] - store i8 %73, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:74 [#uses=1] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1] - store i16 %75, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:76 [#uses=1] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %76, i16 11 ) ; <i16>:77 [#uses=1] - store i16 %77, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:78 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1] - store i32 %79, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:80 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %80, i32 11 ) ; <i32>:81 [#uses=1] - store i32 %81, i32* @ui, align 4 - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:82 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %82, i64 11 ) ; <i64>:83 [#uses=1] - store i64 %83, i64* @sl, align 8 - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:84 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %84, i64 11 ) ; <i64>:85 [#uses=1] - store i64 %85, i64* @ul, align 8 - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:86 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %86, i64 11 ) ; <i64>:87 [#uses=1] - store i64 %87, i64* @sll, align 8 - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:88 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %88, i64 11 ) ; <i64>:89 [#uses=1] - store i64 %89, i64* @ull, align 8 - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:90 [#uses=1] - store i8 %90, i8* @sc, align 1 - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:91 [#uses=1] - store i8 %91, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:92 [#uses=1] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %92, i16 11 ) ; <i16>:93 [#uses=1] - store i16 %93, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:94 [#uses=1] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %94, i16 11 ) ; <i16>:95 [#uses=1] - store i16 %95, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:96 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %96, i32 11 ) ; <i32>:97 [#uses=1] - store i32 %97, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:98 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %98, i32 11 ) ; <i32>:99 [#uses=1] - store i32 %99, i32* @ui, align 4 - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:100 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %100, i64 11 ) ; <i64>:101 [#uses=1] - store i64 %101, i64* @sl, align 8 - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:102 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %102, i64 11 ) ; <i64>:103 [#uses=1] - store i64 %103, i64* @ul, align 8 - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:104 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %104, i64 11 ) ; <i64>:105 [#uses=1] - store i64 %105, i64* @sll, align 8 - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:106 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %106, i64 11 ) ; <i64>:107 [#uses=1] - store i64 %107, i64* @ull, align 8 - br label %return - -return: ; preds = %entry - ret void + %0 = atomicrmw add i8* @sc, i8 11 monotonic + store i8 %0, i8* @sc, align 1 + %1 = atomicrmw add i8* @uc, i8 11 monotonic + store i8 %1, i8* @uc, align 1 + %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %3 = atomicrmw add i16* %2, i16 11 monotonic + store i16 %3, i16* @ss, align 2 + %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %5 = atomicrmw add i16* %4, i16 11 monotonic + store i16 %5, i16* @us, align 2 + %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %7 = atomicrmw add i32* %6, i32 11 monotonic + store i32 %7, i32* @si, align 4 + %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %9 = atomicrmw add i32* %8, i32 11 monotonic + store i32 %9, i32* @ui, align 4 + %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %11 = atomicrmw add i64* %10, i64 11 monotonic + store i64 %11, i64* @sl, align 8 + %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %13 = atomicrmw add i64* %12, i64 11 monotonic + store i64 %13, i64* @ul, align 8 + %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %15 = atomicrmw add i64* %14, i64 11 monotonic + store i64 %15, i64* @sll, align 8 + %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %17 = atomicrmw add i64* %16, i64 11 monotonic + store i64 %17, i64* @ull, align 8 + %18 = atomicrmw sub i8* @sc, i8 11 monotonic + store i8 %18, i8* @sc, align 1 + %19 = atomicrmw sub i8* @uc, i8 11 monotonic + store i8 %19, i8* @uc, align 1 + %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %21 = atomicrmw sub i16* %20, i16 11 monotonic + store i16 %21, i16* @ss, align 2 + %22 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %23 = atomicrmw sub i16* %22, i16 11 monotonic + store i16 %23, i16* @us, align 2 + %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %25 = atomicrmw sub i32* %24, i32 11 monotonic + store i32 %25, i32* @si, align 4 + %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %27 = atomicrmw sub i32* %26, i32 11 monotonic + store i32 %27, i32* @ui, align 4 + %28 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %29 = atomicrmw sub i64* %28, i64 11 monotonic + store i64 %29, i64* @sl, align 8 + %30 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %31 = atomicrmw sub i64* %30, i64 11 monotonic + store i64 %31, i64* @ul, align 8 + %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %33 = atomicrmw sub i64* %32, i64 11 monotonic + store i64 %33, i64* @sll, align 8 + %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %35 = atomicrmw sub i64* %34, i64 11 monotonic + store i64 %35, i64* @ull, align 8 + %36 = atomicrmw or i8* @sc, i8 11 monotonic + store i8 %36, i8* @sc, align 1 + %37 = atomicrmw or i8* @uc, i8 11 monotonic + store i8 %37, i8* @uc, align 1 + %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %39 = atomicrmw or i16* %38, i16 11 monotonic + store i16 %39, i16* @ss, align 2 + %40 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %41 = atomicrmw or i16* %40, i16 11 monotonic + store i16 %41, i16* @us, align 2 + %42 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %43 = atomicrmw or i32* %42, i32 11 monotonic + store i32 %43, i32* @si, align 4 + %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %45 = atomicrmw or i32* %44, i32 11 monotonic + store i32 %45, i32* @ui, align 4 + %46 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %47 = atomicrmw or i64* %46, i64 11 monotonic + store i64 %47, i64* @sl, align 8 + %48 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %49 = atomicrmw or i64* %48, i64 11 monotonic + store i64 %49, i64* @ul, align 8 + %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %51 = atomicrmw or i64* %50, i64 11 monotonic + store i64 %51, i64* @sll, align 8 + %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %53 = atomicrmw or i64* %52, i64 11 monotonic + store i64 %53, i64* @ull, align 8 + %54 = atomicrmw xor i8* @sc, i8 11 monotonic + store i8 %54, i8* @sc, align 1 + %55 = atomicrmw xor i8* @uc, i8 11 monotonic + store i8 %55, i8* @uc, align 1 + %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %57 = atomicrmw xor i16* %56, i16 11 monotonic + store i16 %57, i16* @ss, align 2 + %58 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %59 = atomicrmw xor i16* %58, i16 11 monotonic + store i16 %59, i16* @us, align 2 + %60 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %61 = atomicrmw xor i32* %60, i32 11 monotonic + store i32 %61, i32* @si, align 4 + %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %63 = atomicrmw xor i32* %62, i32 11 monotonic + store i32 %63, i32* @ui, align 4 + %64 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %65 = atomicrmw xor i64* %64, i64 11 monotonic + store i64 %65, i64* @sl, align 8 + %66 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %67 = atomicrmw xor i64* %66, i64 11 monotonic + store i64 %67, i64* @ul, align 8 + %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %69 = atomicrmw xor i64* %68, i64 11 monotonic + store i64 %69, i64* @sll, align 8 + %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %71 = atomicrmw xor i64* %70, i64 11 monotonic + store i64 %71, i64* @ull, align 8 + %72 = atomicrmw and i8* @sc, i8 11 monotonic + store i8 %72, i8* @sc, align 1 + %73 = atomicrmw and i8* @uc, i8 11 monotonic + store i8 %73, i8* @uc, align 1 + %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %75 = atomicrmw and i16* %74, i16 11 monotonic + store i16 %75, i16* @ss, align 2 + %76 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %77 = atomicrmw and i16* %76, i16 11 monotonic + store i16 %77, i16* @us, align 2 + %78 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %79 = atomicrmw and i32* %78, i32 11 monotonic + store i32 %79, i32* @si, align 4 + %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %81 = atomicrmw and i32* %80, i32 11 monotonic + store i32 %81, i32* @ui, align 4 + %82 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %83 = atomicrmw and i64* %82, i64 11 monotonic + store i64 %83, i64* @sl, align 8 + %84 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %85 = atomicrmw and i64* %84, i64 11 monotonic + store i64 %85, i64* @ul, align 8 + %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %87 = atomicrmw and i64* %86, i64 11 monotonic + store i64 %87, i64* @sll, align 8 + %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %89 = atomicrmw and i64* %88, i64 11 monotonic + store i64 %89, i64* @ull, align 8 + %90 = atomicrmw nand i8* @sc, i8 11 monotonic + store i8 %90, i8* @sc, align 1 + %91 = atomicrmw nand i8* @uc, i8 11 monotonic + store i8 %91, i8* @uc, align 1 + %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %93 = atomicrmw nand i16* %92, i16 11 monotonic + store i16 %93, i16* @ss, align 2 + %94 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %95 = atomicrmw nand i16* %94, i16 11 monotonic + store i16 %95, i16* @us, align 2 + %96 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %97 = atomicrmw nand i32* %96, i32 11 monotonic + store i32 %97, i32* @si, align 4 + %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %99 = atomicrmw nand i32* %98, i32 11 monotonic + store i32 %99, i32* @ui, align 4 + %100 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %101 = atomicrmw nand i64* %100, i64 11 monotonic + store i64 %101, i64* @sl, align 8 + %102 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %103 = atomicrmw nand i64* %102, i64 11 monotonic + store i64 %103, i64* @ul, align 8 + %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %105 = atomicrmw nand i64* %104, i64 11 monotonic + store i64 %105, i64* @sll, align 8 + %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %107 = atomicrmw nand i64* %106, i64 11 monotonic + store i64 %107, i64* @ull, align 8 + br label %return + +return: ; preds = %entry + ret void } define void @test_op_and_fetch() nounwind { entry: - load i8* @uc, align 1 ; <i8>:0 [#uses=1] - zext i8 %0 to i32 ; <i32>:1 [#uses=1] - trunc i32 %1 to i8 ; <i8>:2 [#uses=2] - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %2 ) ; <i8>:3 [#uses=1] - add i8 %3, %2 ; <i8>:4 [#uses=1] - store i8 %4, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:5 [#uses=1] - zext i8 %5 to i32 ; <i32>:6 [#uses=1] - trunc i32 %6 to i8 ; <i8>:7 [#uses=2] - call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %7 ) ; <i8>:8 [#uses=1] - add i8 %8, %7 ; <i8>:9 [#uses=1] - store i8 %9, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:10 [#uses=1] - zext i8 %10 to i32 ; <i32>:11 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:12 [#uses=1] - trunc i32 %11 to i16 ; <i16>:13 [#uses=2] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %12, i16 %13 ) ; <i16>:14 [#uses=1] - add i16 %14, %13 ; <i16>:15 [#uses=1] - store i16 %15, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:16 [#uses=1] - zext i8 %16 to i32 ; <i32>:17 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1] - trunc i32 %17 to i16 ; <i16>:19 [#uses=2] - call i16 @llvm.atomic.load.add.i16.p0i16( i16* %18, i16 %19 ) ; <i16>:20 [#uses=1] - add i16 %20, %19 ; <i16>:21 [#uses=1] - store i16 %21, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:22 [#uses=1] - zext i8 %22 to i32 ; <i32>:23 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %24, i32 %23 ) ; <i32>:25 [#uses=1] - add i32 %25, %23 ; <i32>:26 [#uses=1] - store i32 %26, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:27 [#uses=1] - zext i8 %27 to i32 ; <i32>:28 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:29 [#uses=1] - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %29, i32 %28 ) ; <i32>:30 [#uses=1] - add i32 %30, %28 ; <i32>:31 [#uses=1] - store i32 %31, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:32 [#uses=1] - zext i8 %32 to i64 ; <i64>:33 [#uses=2] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:34 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %34, i64 %33 ) ; <i64>:35 [#uses=1] - add i64 %35, %33 ; <i64>:36 [#uses=1] - store i64 %36, i64* @sl, align 8 - load i8* @uc, align 1 ; <i8>:37 [#uses=1] - zext i8 %37 to i64 ; <i64>:38 [#uses=2] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:39 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %39, i64 %38 ) ; <i64>:40 [#uses=1] - add i64 %40, %38 ; <i64>:41 [#uses=1] - store i64 %41, i64* @ul, align 8 - load i8* @uc, align 1 ; <i8>:42 [#uses=1] - zext i8 %42 to i64 ; <i64>:43 [#uses=2] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:44 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %44, i64 %43 ) ; <i64>:45 [#uses=1] - add i64 %45, %43 ; <i64>:46 [#uses=1] - store i64 %46, i64* @sll, align 8 - load i8* @uc, align 1 ; <i8>:47 [#uses=1] - zext i8 %47 to i64 ; <i64>:48 [#uses=2] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:49 [#uses=1] - call i64 @llvm.atomic.load.add.i64.p0i64( i64* %49, i64 %48 ) ; <i64>:50 [#uses=1] - add i64 %50, %48 ; <i64>:51 [#uses=1] - store i64 %51, i64* @ull, align 8 - load i8* @uc, align 1 ; <i8>:52 [#uses=1] - zext i8 %52 to i32 ; <i32>:53 [#uses=1] - trunc i32 %53 to i8 ; <i8>:54 [#uses=2] - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %54 ) ; <i8>:55 [#uses=1] - sub i8 %55, %54 ; <i8>:56 [#uses=1] - store i8 %56, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:57 [#uses=1] - zext i8 %57 to i32 ; <i32>:58 [#uses=1] - trunc i32 %58 to i8 ; <i8>:59 [#uses=2] - call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %59 ) ; <i8>:60 [#uses=1] - sub i8 %60, %59 ; <i8>:61 [#uses=1] - store i8 %61, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:62 [#uses=1] - zext i8 %62 to i32 ; <i32>:63 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:64 [#uses=1] - trunc i32 %63 to i16 ; <i16>:65 [#uses=2] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %64, i16 %65 ) ; <i16>:66 [#uses=1] - sub i16 %66, %65 ; <i16>:67 [#uses=1] - store i16 %67, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:68 [#uses=1] - zext i8 %68 to i32 ; <i32>:69 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:70 [#uses=1] - trunc i32 %69 to i16 ; <i16>:71 [#uses=2] - call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %70, i16 %71 ) ; <i16>:72 [#uses=1] - sub i16 %72, %71 ; <i16>:73 [#uses=1] - store i16 %73, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:74 [#uses=1] - zext i8 %74 to i32 ; <i32>:75 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %76, i32 %75 ) ; <i32>:77 [#uses=1] - sub i32 %77, %75 ; <i32>:78 [#uses=1] - store i32 %78, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:79 [#uses=1] - zext i8 %79 to i32 ; <i32>:80 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:81 [#uses=1] - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %81, i32 %80 ) ; <i32>:82 [#uses=1] - sub i32 %82, %80 ; <i32>:83 [#uses=1] - store i32 %83, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:84 [#uses=1] - zext i8 %84 to i64 ; <i64>:85 [#uses=2] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:86 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %86, i64 %85 ) ; <i64>:87 [#uses=1] - sub i64 %87, %85 ; <i64>:88 [#uses=1] - store i64 %88, i64* @sl, align 8 - load i8* @uc, align 1 ; <i8>:89 [#uses=1] - zext i8 %89 to i64 ; <i64>:90 [#uses=2] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:91 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %91, i64 %90 ) ; <i64>:92 [#uses=1] - sub i64 %92, %90 ; <i64>:93 [#uses=1] - store i64 %93, i64* @ul, align 8 - load i8* @uc, align 1 ; <i8>:94 [#uses=1] - zext i8 %94 to i64 ; <i64>:95 [#uses=2] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:96 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %96, i64 %95 ) ; <i64>:97 [#uses=1] - sub i64 %97, %95 ; <i64>:98 [#uses=1] - store i64 %98, i64* @sll, align 8 - load i8* @uc, align 1 ; <i8>:99 [#uses=1] - zext i8 %99 to i64 ; <i64>:100 [#uses=2] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:101 [#uses=1] - call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %101, i64 %100 ) ; <i64>:102 [#uses=1] - sub i64 %102, %100 ; <i64>:103 [#uses=1] - store i64 %103, i64* @ull, align 8 - load i8* @uc, align 1 ; <i8>:104 [#uses=1] - zext i8 %104 to i32 ; <i32>:105 [#uses=1] - trunc i32 %105 to i8 ; <i8>:106 [#uses=2] - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %106 ) ; <i8>:107 [#uses=1] - or i8 %107, %106 ; <i8>:108 [#uses=1] - store i8 %108, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:109 [#uses=1] - zext i8 %109 to i32 ; <i32>:110 [#uses=1] - trunc i32 %110 to i8 ; <i8>:111 [#uses=2] - call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %111 ) ; <i8>:112 [#uses=1] - or i8 %112, %111 ; <i8>:113 [#uses=1] - store i8 %113, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:114 [#uses=1] - zext i8 %114 to i32 ; <i32>:115 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:116 [#uses=1] - trunc i32 %115 to i16 ; <i16>:117 [#uses=2] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %116, i16 %117 ) ; <i16>:118 [#uses=1] - or i16 %118, %117 ; <i16>:119 [#uses=1] - store i16 %119, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:120 [#uses=1] - zext i8 %120 to i32 ; <i32>:121 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:122 [#uses=1] - trunc i32 %121 to i16 ; <i16>:123 [#uses=2] - call i16 @llvm.atomic.load.or.i16.p0i16( i16* %122, i16 %123 ) ; <i16>:124 [#uses=1] - or i16 %124, %123 ; <i16>:125 [#uses=1] - store i16 %125, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:126 [#uses=1] - zext i8 %126 to i32 ; <i32>:127 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:128 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %128, i32 %127 ) ; <i32>:129 [#uses=1] - or i32 %129, %127 ; <i32>:130 [#uses=1] - store i32 %130, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:131 [#uses=1] - zext i8 %131 to i32 ; <i32>:132 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:133 [#uses=1] - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %133, i32 %132 ) ; <i32>:134 [#uses=1] - or i32 %134, %132 ; <i32>:135 [#uses=1] - store i32 %135, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:136 [#uses=1] - zext i8 %136 to i64 ; <i64>:137 [#uses=2] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:138 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %138, i64 %137 ) ; <i64>:139 [#uses=1] - or i64 %139, %137 ; <i64>:140 [#uses=1] - store i64 %140, i64* @sl, align 8 - load i8* @uc, align 1 ; <i8>:141 [#uses=1] - zext i8 %141 to i64 ; <i64>:142 [#uses=2] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:143 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %143, i64 %142 ) ; <i64>:144 [#uses=1] - or i64 %144, %142 ; <i64>:145 [#uses=1] - store i64 %145, i64* @ul, align 8 - load i8* @uc, align 1 ; <i8>:146 [#uses=1] - zext i8 %146 to i64 ; <i64>:147 [#uses=2] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:148 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %148, i64 %147 ) ; <i64>:149 [#uses=1] - or i64 %149, %147 ; <i64>:150 [#uses=1] - store i64 %150, i64* @sll, align 8 - load i8* @uc, align 1 ; <i8>:151 [#uses=1] - zext i8 %151 to i64 ; <i64>:152 [#uses=2] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:153 [#uses=1] - call i64 @llvm.atomic.load.or.i64.p0i64( i64* %153, i64 %152 ) ; <i64>:154 [#uses=1] - or i64 %154, %152 ; <i64>:155 [#uses=1] - store i64 %155, i64* @ull, align 8 - load i8* @uc, align 1 ; <i8>:156 [#uses=1] - zext i8 %156 to i32 ; <i32>:157 [#uses=1] - trunc i32 %157 to i8 ; <i8>:158 [#uses=2] - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %158 ) ; <i8>:159 [#uses=1] - xor i8 %159, %158 ; <i8>:160 [#uses=1] - store i8 %160, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:161 [#uses=1] - zext i8 %161 to i32 ; <i32>:162 [#uses=1] - trunc i32 %162 to i8 ; <i8>:163 [#uses=2] - call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %163 ) ; <i8>:164 [#uses=1] - xor i8 %164, %163 ; <i8>:165 [#uses=1] - store i8 %165, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:166 [#uses=1] - zext i8 %166 to i32 ; <i32>:167 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:168 [#uses=1] - trunc i32 %167 to i16 ; <i16>:169 [#uses=2] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %168, i16 %169 ) ; <i16>:170 [#uses=1] - xor i16 %170, %169 ; <i16>:171 [#uses=1] - store i16 %171, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:172 [#uses=1] - zext i8 %172 to i32 ; <i32>:173 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:174 [#uses=1] - trunc i32 %173 to i16 ; <i16>:175 [#uses=2] - call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %174, i16 %175 ) ; <i16>:176 [#uses=1] - xor i16 %176, %175 ; <i16>:177 [#uses=1] - store i16 %177, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:178 [#uses=1] - zext i8 %178 to i32 ; <i32>:179 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:180 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %180, i32 %179 ) ; <i32>:181 [#uses=1] - xor i32 %181, %179 ; <i32>:182 [#uses=1] - store i32 %182, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:183 [#uses=1] - zext i8 %183 to i32 ; <i32>:184 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:185 [#uses=1] - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %185, i32 %184 ) ; <i32>:186 [#uses=1] - xor i32 %186, %184 ; <i32>:187 [#uses=1] - store i32 %187, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:188 [#uses=1] - zext i8 %188 to i64 ; <i64>:189 [#uses=2] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:190 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %190, i64 %189 ) ; <i64>:191 [#uses=1] - xor i64 %191, %189 ; <i64>:192 [#uses=1] - store i64 %192, i64* @sl, align 8 - load i8* @uc, align 1 ; <i8>:193 [#uses=1] - zext i8 %193 to i64 ; <i64>:194 [#uses=2] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:195 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %195, i64 %194 ) ; <i64>:196 [#uses=1] - xor i64 %196, %194 ; <i64>:197 [#uses=1] - store i64 %197, i64* @ul, align 8 - load i8* @uc, align 1 ; <i8>:198 [#uses=1] - zext i8 %198 to i64 ; <i64>:199 [#uses=2] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:200 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %200, i64 %199 ) ; <i64>:201 [#uses=1] - xor i64 %201, %199 ; <i64>:202 [#uses=1] - store i64 %202, i64* @sll, align 8 - load i8* @uc, align 1 ; <i8>:203 [#uses=1] - zext i8 %203 to i64 ; <i64>:204 [#uses=2] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:205 [#uses=1] - call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %205, i64 %204 ) ; <i64>:206 [#uses=1] - xor i64 %206, %204 ; <i64>:207 [#uses=1] - store i64 %207, i64* @ull, align 8 - load i8* @uc, align 1 ; <i8>:208 [#uses=1] - zext i8 %208 to i32 ; <i32>:209 [#uses=1] - trunc i32 %209 to i8 ; <i8>:210 [#uses=2] - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %210 ) ; <i8>:211 [#uses=1] - and i8 %211, %210 ; <i8>:212 [#uses=1] - store i8 %212, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:213 [#uses=1] - zext i8 %213 to i32 ; <i32>:214 [#uses=1] - trunc i32 %214 to i8 ; <i8>:215 [#uses=2] - call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %215 ) ; <i8>:216 [#uses=1] - and i8 %216, %215 ; <i8>:217 [#uses=1] - store i8 %217, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:218 [#uses=1] - zext i8 %218 to i32 ; <i32>:219 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:220 [#uses=1] - trunc i32 %219 to i16 ; <i16>:221 [#uses=2] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %220, i16 %221 ) ; <i16>:222 [#uses=1] - and i16 %222, %221 ; <i16>:223 [#uses=1] - store i16 %223, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:224 [#uses=1] - zext i8 %224 to i32 ; <i32>:225 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:226 [#uses=1] - trunc i32 %225 to i16 ; <i16>:227 [#uses=2] - call i16 @llvm.atomic.load.and.i16.p0i16( i16* %226, i16 %227 ) ; <i16>:228 [#uses=1] - and i16 %228, %227 ; <i16>:229 [#uses=1] - store i16 %229, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:230 [#uses=1] - zext i8 %230 to i32 ; <i32>:231 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:232 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %232, i32 %231 ) ; <i32>:233 [#uses=1] - and i32 %233, %231 ; <i32>:234 [#uses=1] - store i32 %234, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:235 [#uses=1] - zext i8 %235 to i32 ; <i32>:236 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:237 [#uses=1] - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %237, i32 %236 ) ; <i32>:238 [#uses=1] - and i32 %238, %236 ; <i32>:239 [#uses=1] - store i32 %239, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:240 [#uses=1] - zext i8 %240 to i64 ; <i64>:241 [#uses=2] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:242 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %242, i64 %241 ) ; <i64>:243 [#uses=1] - and i64 %243, %241 ; <i64>:244 [#uses=1] - store i64 %244, i64* @sl, align 8 - load i8* @uc, align 1 ; <i8>:245 [#uses=1] - zext i8 %245 to i64 ; <i64>:246 [#uses=2] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:247 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %247, i64 %246 ) ; <i64>:248 [#uses=1] - and i64 %248, %246 ; <i64>:249 [#uses=1] - store i64 %249, i64* @ul, align 8 - load i8* @uc, align 1 ; <i8>:250 [#uses=1] - zext i8 %250 to i64 ; <i64>:251 [#uses=2] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:252 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %252, i64 %251 ) ; <i64>:253 [#uses=1] - and i64 %253, %251 ; <i64>:254 [#uses=1] - store i64 %254, i64* @sll, align 8 - load i8* @uc, align 1 ; <i8>:255 [#uses=1] - zext i8 %255 to i64 ; <i64>:256 [#uses=2] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:257 [#uses=1] - call i64 @llvm.atomic.load.and.i64.p0i64( i64* %257, i64 %256 ) ; <i64>:258 [#uses=1] - and i64 %258, %256 ; <i64>:259 [#uses=1] - store i64 %259, i64* @ull, align 8 - load i8* @uc, align 1 ; <i8>:260 [#uses=1] - zext i8 %260 to i32 ; <i32>:261 [#uses=1] - trunc i32 %261 to i8 ; <i8>:262 [#uses=2] - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %262 ) ; <i8>:263 [#uses=1] - xor i8 %263, -1 ; <i8>:264 [#uses=1] - and i8 %264, %262 ; <i8>:265 [#uses=1] - store i8 %265, i8* @sc, align 1 - load i8* @uc, align 1 ; <i8>:266 [#uses=1] - zext i8 %266 to i32 ; <i32>:267 [#uses=1] - trunc i32 %267 to i8 ; <i8>:268 [#uses=2] - call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %268 ) ; <i8>:269 [#uses=1] - xor i8 %269, -1 ; <i8>:270 [#uses=1] - and i8 %270, %268 ; <i8>:271 [#uses=1] - store i8 %271, i8* @uc, align 1 - load i8* @uc, align 1 ; <i8>:272 [#uses=1] - zext i8 %272 to i32 ; <i32>:273 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:274 [#uses=1] - trunc i32 %273 to i16 ; <i16>:275 [#uses=2] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %274, i16 %275 ) ; <i16>:276 [#uses=1] - xor i16 %276, -1 ; <i16>:277 [#uses=1] - and i16 %277, %275 ; <i16>:278 [#uses=1] - store i16 %278, i16* @ss, align 2 - load i8* @uc, align 1 ; <i8>:279 [#uses=1] - zext i8 %279 to i32 ; <i32>:280 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:281 [#uses=1] - trunc i32 %280 to i16 ; <i16>:282 [#uses=2] - call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %281, i16 %282 ) ; <i16>:283 [#uses=1] - xor i16 %283, -1 ; <i16>:284 [#uses=1] - and i16 %284, %282 ; <i16>:285 [#uses=1] - store i16 %285, i16* @us, align 2 - load i8* @uc, align 1 ; <i8>:286 [#uses=1] - zext i8 %286 to i32 ; <i32>:287 [#uses=2] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:288 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %288, i32 %287 ) ; <i32>:289 [#uses=1] - xor i32 %289, -1 ; <i32>:290 [#uses=1] - and i32 %290, %287 ; <i32>:291 [#uses=1] - store i32 %291, i32* @si, align 4 - load i8* @uc, align 1 ; <i8>:292 [#uses=1] - zext i8 %292 to i32 ; <i32>:293 [#uses=2] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:294 [#uses=1] - call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %294, i32 %293 ) ; <i32>:295 [#uses=1] - xor i32 %295, -1 ; <i32>:296 [#uses=1] - and i32 %296, %293 ; <i32>:297 [#uses=1] - store i32 %297, i32* @ui, align 4 - load i8* @uc, align 1 ; <i8>:298 [#uses=1] - zext i8 %298 to i64 ; <i64>:299 [#uses=2] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:300 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %300, i64 %299 ) ; <i64>:301 [#uses=1] - xor i64 %301, -1 ; <i64>:302 [#uses=1] - and i64 %302, %299 ; <i64>:303 [#uses=1] - store i64 %303, i64* @sl, align 8 - load i8* @uc, align 1 ; <i8>:304 [#uses=1] - zext i8 %304 to i64 ; <i64>:305 [#uses=2] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:306 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %306, i64 %305 ) ; <i64>:307 [#uses=1] - xor i64 %307, -1 ; <i64>:308 [#uses=1] - and i64 %308, %305 ; <i64>:309 [#uses=1] - store i64 %309, i64* @ul, align 8 - load i8* @uc, align 1 ; <i8>:310 [#uses=1] - zext i8 %310 to i64 ; <i64>:311 [#uses=2] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:312 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %312, i64 %311 ) ; <i64>:313 [#uses=1] - xor i64 %313, -1 ; <i64>:314 [#uses=1] - and i64 %314, %311 ; <i64>:315 [#uses=1] - store i64 %315, i64* @sll, align 8 - load i8* @uc, align 1 ; <i8>:316 [#uses=1] - zext i8 %316 to i64 ; <i64>:317 [#uses=2] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:318 [#uses=1] - call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %318, i64 %317 ) ; <i64>:319 [#uses=1] - xor i64 %319, -1 ; <i64>:320 [#uses=1] - and i64 %320, %317 ; <i64>:321 [#uses=1] - store i64 %321, i64* @ull, align 8 - br label %return - -return: ; preds = %entry - ret void + %0 = load i8* @uc, align 1 + %1 = zext i8 %0 to i32 + %2 = trunc i32 %1 to i8 + %3 = atomicrmw add i8* @sc, i8 %2 monotonic + %4 = add i8 %3, %2 + store i8 %4, i8* @sc, align 1 + %5 = load i8* @uc, align 1 + %6 = zext i8 %5 to i32 + %7 = trunc i32 %6 to i8 + %8 = atomicrmw add i8* @uc, i8 %7 monotonic + %9 = add i8 %8, %7 + store i8 %9, i8* @uc, align 1 + %10 = load i8* @uc, align 1 + %11 = zext i8 %10 to i32 + %12 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %13 = trunc i32 %11 to i16 + %14 = atomicrmw add i16* %12, i16 %13 monotonic + %15 = add i16 %14, %13 + store i16 %15, i16* @ss, align 2 + %16 = load i8* @uc, align 1 + %17 = zext i8 %16 to i32 + %18 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %19 = trunc i32 %17 to i16 + %20 = atomicrmw add i16* %18, i16 %19 monotonic + %21 = add i16 %20, %19 + store i16 %21, i16* @us, align 2 + %22 = load i8* @uc, align 1 + %23 = zext i8 %22 to i32 + %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %25 = atomicrmw add i32* %24, i32 %23 monotonic + %26 = add i32 %25, %23 + store i32 %26, i32* @si, align 4 + %27 = load i8* @uc, align 1 + %28 = zext i8 %27 to i32 + %29 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %30 = atomicrmw add i32* %29, i32 %28 monotonic + %31 = add i32 %30, %28 + store i32 %31, i32* @ui, align 4 + %32 = load i8* @uc, align 1 + %33 = zext i8 %32 to i64 + %34 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %35 = atomicrmw add i64* %34, i64 %33 monotonic + %36 = add i64 %35, %33 + store i64 %36, i64* @sl, align 8 + %37 = load i8* @uc, align 1 + %38 = zext i8 %37 to i64 + %39 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %40 = atomicrmw add i64* %39, i64 %38 monotonic + %41 = add i64 %40, %38 + store i64 %41, i64* @ul, align 8 + %42 = load i8* @uc, align 1 + %43 = zext i8 %42 to i64 + %44 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %45 = atomicrmw add i64* %44, i64 %43 monotonic + %46 = add i64 %45, %43 + store i64 %46, i64* @sll, align 8 + %47 = load i8* @uc, align 1 + %48 = zext i8 %47 to i64 + %49 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %50 = atomicrmw add i64* %49, i64 %48 monotonic + %51 = add i64 %50, %48 + store i64 %51, i64* @ull, align 8 + %52 = load i8* @uc, align 1 + %53 = zext i8 %52 to i32 + %54 = trunc i32 %53 to i8 + %55 = atomicrmw sub i8* @sc, i8 %54 monotonic + %56 = sub i8 %55, %54 + store i8 %56, i8* @sc, align 1 + %57 = load i8* @uc, align 1 + %58 = zext i8 %57 to i32 + %59 = trunc i32 %58 to i8 + %60 = atomicrmw sub i8* @uc, i8 %59 monotonic + %61 = sub i8 %60, %59 + store i8 %61, i8* @uc, align 1 + %62 = load i8* @uc, align 1 + %63 = zext i8 %62 to i32 + %64 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %65 = trunc i32 %63 to i16 + %66 = atomicrmw sub i16* %64, i16 %65 monotonic + %67 = sub i16 %66, %65 + store i16 %67, i16* @ss, align 2 + %68 = load i8* @uc, align 1 + %69 = zext i8 %68 to i32 + %70 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %71 = trunc i32 %69 to i16 + %72 = atomicrmw sub i16* %70, i16 %71 monotonic + %73 = sub i16 %72, %71 + store i16 %73, i16* @us, align 2 + %74 = load i8* @uc, align 1 + %75 = zext i8 %74 to i32 + %76 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %77 = atomicrmw sub i32* %76, i32 %75 monotonic + %78 = sub i32 %77, %75 + store i32 %78, i32* @si, align 4 + %79 = load i8* @uc, align 1 + %80 = zext i8 %79 to i32 + %81 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %82 = atomicrmw sub i32* %81, i32 %80 monotonic + %83 = sub i32 %82, %80 + store i32 %83, i32* @ui, align 4 + %84 = load i8* @uc, align 1 + %85 = zext i8 %84 to i64 + %86 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %87 = atomicrmw sub i64* %86, i64 %85 monotonic + %88 = sub i64 %87, %85 + store i64 %88, i64* @sl, align 8 + %89 = load i8* @uc, align 1 + %90 = zext i8 %89 to i64 + %91 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %92 = atomicrmw sub i64* %91, i64 %90 monotonic + %93 = sub i64 %92, %90 + store i64 %93, i64* @ul, align 8 + %94 = load i8* @uc, align 1 + %95 = zext i8 %94 to i64 + %96 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %97 = atomicrmw sub i64* %96, i64 %95 monotonic + %98 = sub i64 %97, %95 + store i64 %98, i64* @sll, align 8 + %99 = load i8* @uc, align 1 + %100 = zext i8 %99 to i64 + %101 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %102 = atomicrmw sub i64* %101, i64 %100 monotonic + %103 = sub i64 %102, %100 + store i64 %103, i64* @ull, align 8 + %104 = load i8* @uc, align 1 + %105 = zext i8 %104 to i32 + %106 = trunc i32 %105 to i8 + %107 = atomicrmw or i8* @sc, i8 %106 monotonic + %108 = or i8 %107, %106 + store i8 %108, i8* @sc, align 1 + %109 = load i8* @uc, align 1 + %110 = zext i8 %109 to i32 + %111 = trunc i32 %110 to i8 + %112 = atomicrmw or i8* @uc, i8 %111 monotonic + %113 = or i8 %112, %111 + store i8 %113, i8* @uc, align 1 + %114 = load i8* @uc, align 1 + %115 = zext i8 %114 to i32 + %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %117 = trunc i32 %115 to i16 + %118 = atomicrmw or i16* %116, i16 %117 monotonic + %119 = or i16 %118, %117 + store i16 %119, i16* @ss, align 2 + %120 = load i8* @uc, align 1 + %121 = zext i8 %120 to i32 + %122 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %123 = trunc i32 %121 to i16 + %124 = atomicrmw or i16* %122, i16 %123 monotonic + %125 = or i16 %124, %123 + store i16 %125, i16* @us, align 2 + %126 = load i8* @uc, align 1 + %127 = zext i8 %126 to i32 + %128 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %129 = atomicrmw or i32* %128, i32 %127 monotonic + %130 = or i32 %129, %127 + store i32 %130, i32* @si, align 4 + %131 = load i8* @uc, align 1 + %132 = zext i8 %131 to i32 + %133 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %134 = atomicrmw or i32* %133, i32 %132 monotonic + %135 = or i32 %134, %132 + store i32 %135, i32* @ui, align 4 + %136 = load i8* @uc, align 1 + %137 = zext i8 %136 to i64 + %138 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %139 = atomicrmw or i64* %138, i64 %137 monotonic + %140 = or i64 %139, %137 + store i64 %140, i64* @sl, align 8 + %141 = load i8* @uc, align 1 + %142 = zext i8 %141 to i64 + %143 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %144 = atomicrmw or i64* %143, i64 %142 monotonic + %145 = or i64 %144, %142 + store i64 %145, i64* @ul, align 8 + %146 = load i8* @uc, align 1 + %147 = zext i8 %146 to i64 + %148 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %149 = atomicrmw or i64* %148, i64 %147 monotonic + %150 = or i64 %149, %147 + store i64 %150, i64* @sll, align 8 + %151 = load i8* @uc, align 1 + %152 = zext i8 %151 to i64 + %153 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %154 = atomicrmw or i64* %153, i64 %152 monotonic + %155 = or i64 %154, %152 + store i64 %155, i64* @ull, align 8 + %156 = load i8* @uc, align 1 + %157 = zext i8 %156 to i32 + %158 = trunc i32 %157 to i8 + %159 = atomicrmw xor i8* @sc, i8 %158 monotonic + %160 = xor i8 %159, %158 + store i8 %160, i8* @sc, align 1 + %161 = load i8* @uc, align 1 + %162 = zext i8 %161 to i32 + %163 = trunc i32 %162 to i8 + %164 = atomicrmw xor i8* @uc, i8 %163 monotonic + %165 = xor i8 %164, %163 + store i8 %165, i8* @uc, align 1 + %166 = load i8* @uc, align 1 + %167 = zext i8 %166 to i32 + %168 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %169 = trunc i32 %167 to i16 + %170 = atomicrmw xor i16* %168, i16 %169 monotonic + %171 = xor i16 %170, %169 + store i16 %171, i16* @ss, align 2 + %172 = load i8* @uc, align 1 + %173 = zext i8 %172 to i32 + %174 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %175 = trunc i32 %173 to i16 + %176 = atomicrmw xor i16* %174, i16 %175 monotonic + %177 = xor i16 %176, %175 + store i16 %177, i16* @us, align 2 + %178 = load i8* @uc, align 1 + %179 = zext i8 %178 to i32 + %180 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %181 = atomicrmw xor i32* %180, i32 %179 monotonic + %182 = xor i32 %181, %179 + store i32 %182, i32* @si, align 4 + %183 = load i8* @uc, align 1 + %184 = zext i8 %183 to i32 + %185 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %186 = atomicrmw xor i32* %185, i32 %184 monotonic + %187 = xor i32 %186, %184 + store i32 %187, i32* @ui, align 4 + %188 = load i8* @uc, align 1 + %189 = zext i8 %188 to i64 + %190 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %191 = atomicrmw xor i64* %190, i64 %189 monotonic + %192 = xor i64 %191, %189 + store i64 %192, i64* @sl, align 8 + %193 = load i8* @uc, align 1 + %194 = zext i8 %193 to i64 + %195 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %196 = atomicrmw xor i64* %195, i64 %194 monotonic + %197 = xor i64 %196, %194 + store i64 %197, i64* @ul, align 8 + %198 = load i8* @uc, align 1 + %199 = zext i8 %198 to i64 + %200 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %201 = atomicrmw xor i64* %200, i64 %199 monotonic + %202 = xor i64 %201, %199 + store i64 %202, i64* @sll, align 8 + %203 = load i8* @uc, align 1 + %204 = zext i8 %203 to i64 + %205 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %206 = atomicrmw xor i64* %205, i64 %204 monotonic + %207 = xor i64 %206, %204 + store i64 %207, i64* @ull, align 8 + %208 = load i8* @uc, align 1 + %209 = zext i8 %208 to i32 + %210 = trunc i32 %209 to i8 + %211 = atomicrmw and i8* @sc, i8 %210 monotonic + %212 = and i8 %211, %210 + store i8 %212, i8* @sc, align 1 + %213 = load i8* @uc, align 1 + %214 = zext i8 %213 to i32 + %215 = trunc i32 %214 to i8 + %216 = atomicrmw and i8* @uc, i8 %215 monotonic + %217 = and i8 %216, %215 + store i8 %217, i8* @uc, align 1 + %218 = load i8* @uc, align 1 + %219 = zext i8 %218 to i32 + %220 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %221 = trunc i32 %219 to i16 + %222 = atomicrmw and i16* %220, i16 %221 monotonic + %223 = and i16 %222, %221 + store i16 %223, i16* @ss, align 2 + %224 = load i8* @uc, align 1 + %225 = zext i8 %224 to i32 + %226 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %227 = trunc i32 %225 to i16 + %228 = atomicrmw and i16* %226, i16 %227 monotonic + %229 = and i16 %228, %227 + store i16 %229, i16* @us, align 2 + %230 = load i8* @uc, align 1 + %231 = zext i8 %230 to i32 + %232 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %233 = atomicrmw and i32* %232, i32 %231 monotonic + %234 = and i32 %233, %231 + store i32 %234, i32* @si, align 4 + %235 = load i8* @uc, align 1 + %236 = zext i8 %235 to i32 + %237 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %238 = atomicrmw and i32* %237, i32 %236 monotonic + %239 = and i32 %238, %236 + store i32 %239, i32* @ui, align 4 + %240 = load i8* @uc, align 1 + %241 = zext i8 %240 to i64 + %242 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %243 = atomicrmw and i64* %242, i64 %241 monotonic + %244 = and i64 %243, %241 + store i64 %244, i64* @sl, align 8 + %245 = load i8* @uc, align 1 + %246 = zext i8 %245 to i64 + %247 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %248 = atomicrmw and i64* %247, i64 %246 monotonic + %249 = and i64 %248, %246 + store i64 %249, i64* @ul, align 8 + %250 = load i8* @uc, align 1 + %251 = zext i8 %250 to i64 + %252 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %253 = atomicrmw and i64* %252, i64 %251 monotonic + %254 = and i64 %253, %251 + store i64 %254, i64* @sll, align 8 + %255 = load i8* @uc, align 1 + %256 = zext i8 %255 to i64 + %257 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %258 = atomicrmw and i64* %257, i64 %256 monotonic + %259 = and i64 %258, %256 + store i64 %259, i64* @ull, align 8 + %260 = load i8* @uc, align 1 + %261 = zext i8 %260 to i32 + %262 = trunc i32 %261 to i8 + %263 = atomicrmw nand i8* @sc, i8 %262 monotonic + %264 = xor i8 %263, -1 + %265 = and i8 %264, %262 + store i8 %265, i8* @sc, align 1 + %266 = load i8* @uc, align 1 + %267 = zext i8 %266 to i32 + %268 = trunc i32 %267 to i8 + %269 = atomicrmw nand i8* @uc, i8 %268 monotonic + %270 = xor i8 %269, -1 + %271 = and i8 %270, %268 + store i8 %271, i8* @uc, align 1 + %272 = load i8* @uc, align 1 + %273 = zext i8 %272 to i32 + %274 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %275 = trunc i32 %273 to i16 + %276 = atomicrmw nand i16* %274, i16 %275 monotonic + %277 = xor i16 %276, -1 + %278 = and i16 %277, %275 + store i16 %278, i16* @ss, align 2 + %279 = load i8* @uc, align 1 + %280 = zext i8 %279 to i32 + %281 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %282 = trunc i32 %280 to i16 + %283 = atomicrmw nand i16* %281, i16 %282 monotonic + %284 = xor i16 %283, -1 + %285 = and i16 %284, %282 + store i16 %285, i16* @us, align 2 + %286 = load i8* @uc, align 1 + %287 = zext i8 %286 to i32 + %288 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %289 = atomicrmw nand i32* %288, i32 %287 monotonic + %290 = xor i32 %289, -1 + %291 = and i32 %290, %287 + store i32 %291, i32* @si, align 4 + %292 = load i8* @uc, align 1 + %293 = zext i8 %292 to i32 + %294 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %295 = atomicrmw nand i32* %294, i32 %293 monotonic + %296 = xor i32 %295, -1 + %297 = and i32 %296, %293 + store i32 %297, i32* @ui, align 4 + %298 = load i8* @uc, align 1 + %299 = zext i8 %298 to i64 + %300 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %301 = atomicrmw nand i64* %300, i64 %299 monotonic + %302 = xor i64 %301, -1 + %303 = and i64 %302, %299 + store i64 %303, i64* @sl, align 8 + %304 = load i8* @uc, align 1 + %305 = zext i8 %304 to i64 + %306 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %307 = atomicrmw nand i64* %306, i64 %305 monotonic + %308 = xor i64 %307, -1 + %309 = and i64 %308, %305 + store i64 %309, i64* @ul, align 8 + %310 = load i8* @uc, align 1 + %311 = zext i8 %310 to i64 + %312 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %313 = atomicrmw nand i64* %312, i64 %311 monotonic + %314 = xor i64 %313, -1 + %315 = and i64 %314, %311 + store i64 %315, i64* @sll, align 8 + %316 = load i8* @uc, align 1 + %317 = zext i8 %316 to i64 + %318 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %319 = atomicrmw nand i64* %318, i64 %317 monotonic + %320 = xor i64 %319, -1 + %321 = and i64 %320, %317 + store i64 %321, i64* @ull, align 8 + br label %return + +return: ; preds = %entry + ret void } define void @test_compare_and_swap() nounwind { entry: - load i8* @sc, align 1 ; <i8>:0 [#uses=1] - zext i8 %0 to i32 ; <i32>:1 [#uses=1] - load i8* @uc, align 1 ; <i8>:2 [#uses=1] - zext i8 %2 to i32 ; <i32>:3 [#uses=1] - trunc i32 %3 to i8 ; <i8>:4 [#uses=1] - trunc i32 %1 to i8 ; <i8>:5 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %4, i8 %5 ) ; <i8>:6 [#uses=1] - store i8 %6, i8* @sc, align 1 - load i8* @sc, align 1 ; <i8>:7 [#uses=1] - zext i8 %7 to i32 ; <i32>:8 [#uses=1] - load i8* @uc, align 1 ; <i8>:9 [#uses=1] - zext i8 %9 to i32 ; <i32>:10 [#uses=1] - trunc i32 %10 to i8 ; <i8>:11 [#uses=1] - trunc i32 %8 to i8 ; <i8>:12 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %11, i8 %12 ) ; <i8>:13 [#uses=1] - store i8 %13, i8* @uc, align 1 - load i8* @sc, align 1 ; <i8>:14 [#uses=1] - sext i8 %14 to i16 ; <i16>:15 [#uses=1] - zext i16 %15 to i32 ; <i32>:16 [#uses=1] - load i8* @uc, align 1 ; <i8>:17 [#uses=1] - zext i8 %17 to i32 ; <i32>:18 [#uses=1] - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:19 [#uses=1] - trunc i32 %18 to i16 ; <i16>:20 [#uses=1] - trunc i32 %16 to i16 ; <i16>:21 [#uses=1] - call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %19, i16 %20, i16 %21 ) ; <i16>:22 [#uses=1] - store i16 %22, i16* @ss, align 2 - load i8* @sc, align 1 ; <i8>:23 [#uses=1] - sext i8 %23 to i16 ; <i16>:24 [#uses=1] - zext i16 %24 to i32 ; <i32>:25 [#uses=1] - load i8* @uc, align 1 ; <i8>:26 [#uses=1] - zext i8 %26 to i32 ; <i32>:27 [#uses=1] - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:28 [#uses=1] - trunc i32 %27 to i16 ; <i16>:29 [#uses=1] - trunc i32 %25 to i16 ; <i16>:30 [#uses=1] - call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %28, i16 %29, i16 %30 ) ; <i16>:31 [#uses=1] - store i16 %31, i16* @us, align 2 - load i8* @sc, align 1 ; <i8>:32 [#uses=1] - sext i8 %32 to i32 ; <i32>:33 [#uses=1] - load i8* @uc, align 1 ; <i8>:34 [#uses=1] - zext i8 %34 to i32 ; <i32>:35 [#uses=1] - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:36 [#uses=1] - call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %36, i32 %35, i32 %33 ) ; <i32>:37 [#uses=1] - store i32 %37, i32* @si, align 4 - load i8* @sc, align 1 ; <i8>:38 [#uses=1] - sext i8 %38 to i32 ; <i32>:39 [#uses=1] - load i8* @uc, align 1 ; <i8>:40 [#uses=1] - zext i8 %40 to i32 ; <i32>:41 [#uses=1] - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:42 [#uses=1] - call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %42, i32 %41, i32 %39 ) ; <i32>:43 [#uses=1] - store i32 %43, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:44 [#uses=1] - sext i8 %44 to i64 ; <i64>:45 [#uses=1] - load i8* @uc, align 1 ; <i8>:46 [#uses=1] - zext i8 %46 to i64 ; <i64>:47 [#uses=1] - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:48 [#uses=1] - call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %48, i64 %47, i64 %45 ) ; <i64>:49 [#uses=1] - store i64 %49, i64* @sl, align 8 - load i8* @sc, align 1 ; <i8>:50 [#uses=1] - sext i8 %50 to i64 ; <i64>:51 [#uses=1] - load i8* @uc, align 1 ; <i8>:52 [#uses=1] - zext i8 %52 to i64 ; <i64>:53 [#uses=1] - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:54 [#uses=1] - call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %54, i64 %53, i64 %51 ) ; <i64>:55 [#uses=1] - store i64 %55, i64* @ul, align 8 - load i8* @sc, align 1 ; <i8>:56 [#uses=1] - sext i8 %56 to i64 ; <i64>:57 [#uses=1] - load i8* @uc, align 1 ; <i8>:58 [#uses=1] - zext i8 %58 to i64 ; <i64>:59 [#uses=1] - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:60 [#uses=1] - call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %60, i64 %59, i64 %57 ) ; <i64>:61 [#uses=1] - store i64 %61, i64* @sll, align 8 - load i8* @sc, align 1 ; <i8>:62 [#uses=1] - sext i8 %62 to i64 ; <i64>:63 [#uses=1] - load i8* @uc, align 1 ; <i8>:64 [#uses=1] - zext i8 %64 to i64 ; <i64>:65 [#uses=1] - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:66 [#uses=1] - call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %66, i64 %65, i64 %63 ) ; <i64>:67 [#uses=1] - store i64 %67, i64* @ull, align 8 - load i8* @sc, align 1 ; <i8>:68 [#uses=1] - zext i8 %68 to i32 ; <i32>:69 [#uses=1] - load i8* @uc, align 1 ; <i8>:70 [#uses=1] - zext i8 %70 to i32 ; <i32>:71 [#uses=1] - trunc i32 %71 to i8 ; <i8>:72 [#uses=2] - trunc i32 %69 to i8 ; <i8>:73 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %72, i8 %73 ) ; <i8>:74 [#uses=1] - icmp eq i8 %74, %72 ; <i1>:75 [#uses=1] - zext i1 %75 to i8 ; <i8>:76 [#uses=1] - zext i8 %76 to i32 ; <i32>:77 [#uses=1] - store i32 %77, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:78 [#uses=1] - zext i8 %78 to i32 ; <i32>:79 [#uses=1] - load i8* @uc, align 1 ; <i8>:80 [#uses=1] - zext i8 %80 to i32 ; <i32>:81 [#uses=1] - trunc i32 %81 to i8 ; <i8>:82 [#uses=2] - trunc i32 %79 to i8 ; <i8>:83 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %82, i8 %83 ) ; <i8>:84 [#uses=1] - icmp eq i8 %84, %82 ; <i1>:85 [#uses=1] - zext i1 %85 to i8 ; <i8>:86 [#uses=1] - zext i8 %86 to i32 ; <i32>:87 [#uses=1] - store i32 %87, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:88 [#uses=1] - sext i8 %88 to i16 ; <i16>:89 [#uses=1] - zext i16 %89 to i32 ; <i32>:90 [#uses=1] - load i8* @uc, align 1 ; <i8>:91 [#uses=1] - zext i8 %91 to i32 ; <i32>:92 [#uses=1] - trunc i32 %92 to i8 ; <i8>:93 [#uses=2] - trunc i32 %90 to i8 ; <i8>:94 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 ) ; <i8>:95 [#uses=1] - icmp eq i8 %95, %93 ; <i1>:96 [#uses=1] - zext i1 %96 to i8 ; <i8>:97 [#uses=1] - zext i8 %97 to i32 ; <i32>:98 [#uses=1] - store i32 %98, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:99 [#uses=1] - sext i8 %99 to i16 ; <i16>:100 [#uses=1] - zext i16 %100 to i32 ; <i32>:101 [#uses=1] - load i8* @uc, align 1 ; <i8>:102 [#uses=1] - zext i8 %102 to i32 ; <i32>:103 [#uses=1] - trunc i32 %103 to i8 ; <i8>:104 [#uses=2] - trunc i32 %101 to i8 ; <i8>:105 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 ) ; <i8>:106 [#uses=1] - icmp eq i8 %106, %104 ; <i1>:107 [#uses=1] - zext i1 %107 to i8 ; <i8>:108 [#uses=1] - zext i8 %108 to i32 ; <i32>:109 [#uses=1] - store i32 %109, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:110 [#uses=1] - sext i8 %110 to i32 ; <i32>:111 [#uses=1] - load i8* @uc, align 1 ; <i8>:112 [#uses=1] - zext i8 %112 to i32 ; <i32>:113 [#uses=1] - trunc i32 %113 to i8 ; <i8>:114 [#uses=2] - trunc i32 %111 to i8 ; <i8>:115 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 ) ; <i8>:116 [#uses=1] - icmp eq i8 %116, %114 ; <i1>:117 [#uses=1] - zext i1 %117 to i8 ; <i8>:118 [#uses=1] - zext i8 %118 to i32 ; <i32>:119 [#uses=1] - store i32 %119, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:120 [#uses=1] - sext i8 %120 to i32 ; <i32>:121 [#uses=1] - load i8* @uc, align 1 ; <i8>:122 [#uses=1] - zext i8 %122 to i32 ; <i32>:123 [#uses=1] - trunc i32 %123 to i8 ; <i8>:124 [#uses=2] - trunc i32 %121 to i8 ; <i8>:125 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 ) ; <i8>:126 [#uses=1] - icmp eq i8 %126, %124 ; <i1>:127 [#uses=1] - zext i1 %127 to i8 ; <i8>:128 [#uses=1] - zext i8 %128 to i32 ; <i32>:129 [#uses=1] - store i32 %129, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:130 [#uses=1] - sext i8 %130 to i64 ; <i64>:131 [#uses=1] - load i8* @uc, align 1 ; <i8>:132 [#uses=1] - zext i8 %132 to i64 ; <i64>:133 [#uses=1] - trunc i64 %133 to i8 ; <i8>:134 [#uses=2] - trunc i64 %131 to i8 ; <i8>:135 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 ) ; <i8>:136 [#uses=1] - icmp eq i8 %136, %134 ; <i1>:137 [#uses=1] - zext i1 %137 to i8 ; <i8>:138 [#uses=1] - zext i8 %138 to i32 ; <i32>:139 [#uses=1] - store i32 %139, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:140 [#uses=1] - sext i8 %140 to i64 ; <i64>:141 [#uses=1] - load i8* @uc, align 1 ; <i8>:142 [#uses=1] - zext i8 %142 to i64 ; <i64>:143 [#uses=1] - trunc i64 %143 to i8 ; <i8>:144 [#uses=2] - trunc i64 %141 to i8 ; <i8>:145 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 ) ; <i8>:146 [#uses=1] - icmp eq i8 %146, %144 ; <i1>:147 [#uses=1] - zext i1 %147 to i8 ; <i8>:148 [#uses=1] - zext i8 %148 to i32 ; <i32>:149 [#uses=1] - store i32 %149, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:150 [#uses=1] - sext i8 %150 to i64 ; <i64>:151 [#uses=1] - load i8* @uc, align 1 ; <i8>:152 [#uses=1] - zext i8 %152 to i64 ; <i64>:153 [#uses=1] - trunc i64 %153 to i8 ; <i8>:154 [#uses=2] - trunc i64 %151 to i8 ; <i8>:155 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 ) ; <i8>:156 [#uses=1] - icmp eq i8 %156, %154 ; <i1>:157 [#uses=1] - zext i1 %157 to i8 ; <i8>:158 [#uses=1] - zext i8 %158 to i32 ; <i32>:159 [#uses=1] - store i32 %159, i32* @ui, align 4 - load i8* @sc, align 1 ; <i8>:160 [#uses=1] - sext i8 %160 to i64 ; <i64>:161 [#uses=1] - load i8* @uc, align 1 ; <i8>:162 [#uses=1] - zext i8 %162 to i64 ; <i64>:163 [#uses=1] - trunc i64 %163 to i8 ; <i8>:164 [#uses=2] - trunc i64 %161 to i8 ; <i8>:165 [#uses=1] - call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 ) ; <i8>:166 [#uses=1] - icmp eq i8 %166, %164 ; <i1>:167 [#uses=1] - zext i1 %167 to i8 ; <i8>:168 [#uses=1] - zext i8 %168 to i32 ; <i32>:169 [#uses=1] - store i32 %169, i32* @ui, align 4 - br label %return - -return: ; preds = %entry - ret void + %0 = load i8* @sc, align 1 + %1 = zext i8 %0 to i32 + %2 = load i8* @uc, align 1 + %3 = zext i8 %2 to i32 + %4 = trunc i32 %3 to i8 + %5 = trunc i32 %1 to i8 + %6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic + store i8 %6, i8* @sc, align 1 + %7 = load i8* @sc, align 1 + %8 = zext i8 %7 to i32 + %9 = load i8* @uc, align 1 + %10 = zext i8 %9 to i32 + %11 = trunc i32 %10 to i8 + %12 = trunc i32 %8 to i8 + %13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic + store i8 %13, i8* @uc, align 1 + %14 = load i8* @sc, align 1 + %15 = sext i8 %14 to i16 + %16 = zext i16 %15 to i32 + %17 = load i8* @uc, align 1 + %18 = zext i8 %17 to i32 + %19 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %20 = trunc i32 %18 to i16 + %21 = trunc i32 %16 to i16 + %22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic + store i16 %22, i16* @ss, align 2 + %23 = load i8* @sc, align 1 + %24 = sext i8 %23 to i16 + %25 = zext i16 %24 to i32 + %26 = load i8* @uc, align 1 + %27 = zext i8 %26 to i32 + %28 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %29 = trunc i32 %27 to i16 + %30 = trunc i32 %25 to i16 + %31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic + store i16 %31, i16* @us, align 2 + %32 = load i8* @sc, align 1 + %33 = sext i8 %32 to i32 + %34 = load i8* @uc, align 1 + %35 = zext i8 %34 to i32 + %36 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic + store i32 %37, i32* @si, align 4 + %38 = load i8* @sc, align 1 + %39 = sext i8 %38 to i32 + %40 = load i8* @uc, align 1 + %41 = zext i8 %40 to i32 + %42 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic + store i32 %43, i32* @ui, align 4 + %44 = load i8* @sc, align 1 + %45 = sext i8 %44 to i64 + %46 = load i8* @uc, align 1 + %47 = zext i8 %46 to i64 + %48 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic + store i64 %49, i64* @sl, align 8 + %50 = load i8* @sc, align 1 + %51 = sext i8 %50 to i64 + %52 = load i8* @uc, align 1 + %53 = zext i8 %52 to i64 + %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic + store i64 %55, i64* @ul, align 8 + %56 = load i8* @sc, align 1 + %57 = sext i8 %56 to i64 + %58 = load i8* @uc, align 1 + %59 = zext i8 %58 to i64 + %60 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic + store i64 %61, i64* @sll, align 8 + %62 = load i8* @sc, align 1 + %63 = sext i8 %62 to i64 + %64 = load i8* @uc, align 1 + %65 = zext i8 %64 to i64 + %66 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic + store i64 %67, i64* @ull, align 8 + %68 = load i8* @sc, align 1 + %69 = zext i8 %68 to i32 + %70 = load i8* @uc, align 1 + %71 = zext i8 %70 to i32 + %72 = trunc i32 %71 to i8 + %73 = trunc i32 %69 to i8 + %74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic + %75 = icmp eq i8 %74, %72 + %76 = zext i1 %75 to i8 + %77 = zext i8 %76 to i32 + store i32 %77, i32* @ui, align 4 + %78 = load i8* @sc, align 1 + %79 = zext i8 %78 to i32 + %80 = load i8* @uc, align 1 + %81 = zext i8 %80 to i32 + %82 = trunc i32 %81 to i8 + %83 = trunc i32 %79 to i8 + %84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic + %85 = icmp eq i8 %84, %82 + %86 = zext i1 %85 to i8 + %87 = zext i8 %86 to i32 + store i32 %87, i32* @ui, align 4 + %88 = load i8* @sc, align 1 + %89 = sext i8 %88 to i16 + %90 = zext i16 %89 to i32 + %91 = load i8* @uc, align 1 + %92 = zext i8 %91 to i32 + %93 = trunc i32 %92 to i8 + %94 = trunc i32 %90 to i8 + %95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic + %96 = icmp eq i8 %95, %93 + %97 = zext i1 %96 to i8 + %98 = zext i8 %97 to i32 + store i32 %98, i32* @ui, align 4 + %99 = load i8* @sc, align 1 + %100 = sext i8 %99 to i16 + %101 = zext i16 %100 to i32 + %102 = load i8* @uc, align 1 + %103 = zext i8 %102 to i32 + %104 = trunc i32 %103 to i8 + %105 = trunc i32 %101 to i8 + %106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic + %107 = icmp eq i8 %106, %104 + %108 = zext i1 %107 to i8 + %109 = zext i8 %108 to i32 + store i32 %109, i32* @ui, align 4 + %110 = load i8* @sc, align 1 + %111 = sext i8 %110 to i32 + %112 = load i8* @uc, align 1 + %113 = zext i8 %112 to i32 + %114 = trunc i32 %113 to i8 + %115 = trunc i32 %111 to i8 + %116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic + %117 = icmp eq i8 %116, %114 + %118 = zext i1 %117 to i8 + %119 = zext i8 %118 to i32 + store i32 %119, i32* @ui, align 4 + %120 = load i8* @sc, align 1 + %121 = sext i8 %120 to i32 + %122 = load i8* @uc, align 1 + %123 = zext i8 %122 to i32 + %124 = trunc i32 %123 to i8 + %125 = trunc i32 %121 to i8 + %126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic + %127 = icmp eq i8 %126, %124 + %128 = zext i1 %127 to i8 + %129 = zext i8 %128 to i32 + store i32 %129, i32* @ui, align 4 + %130 = load i8* @sc, align 1 + %131 = sext i8 %130 to i64 + %132 = load i8* @uc, align 1 + %133 = zext i8 %132 to i64 + %134 = trunc i64 %133 to i8 + %135 = trunc i64 %131 to i8 + %136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic + %137 = icmp eq i8 %136, %134 + %138 = zext i1 %137 to i8 + %139 = zext i8 %138 to i32 + store i32 %139, i32* @ui, align 4 + %140 = load i8* @sc, align 1 + %141 = sext i8 %140 to i64 + %142 = load i8* @uc, align 1 + %143 = zext i8 %142 to i64 + %144 = trunc i64 %143 to i8 + %145 = trunc i64 %141 to i8 + %146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic + %147 = icmp eq i8 %146, %144 + %148 = zext i1 %147 to i8 + %149 = zext i8 %148 to i32 + store i32 %149, i32* @ui, align 4 + %150 = load i8* @sc, align 1 + %151 = sext i8 %150 to i64 + %152 = load i8* @uc, align 1 + %153 = zext i8 %152 to i64 + %154 = trunc i64 %153 to i8 + %155 = trunc i64 %151 to i8 + %156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic + %157 = icmp eq i8 %156, %154 + %158 = zext i1 %157 to i8 + %159 = zext i8 %158 to i32 + store i32 %159, i32* @ui, align 4 + %160 = load i8* @sc, align 1 + %161 = sext i8 %160 to i64 + %162 = load i8* @uc, align 1 + %163 = zext i8 %162 to i64 + %164 = trunc i64 %163 to i8 + %165 = trunc i64 %161 to i8 + %166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic + %167 = icmp eq i8 %166, %164 + %168 = zext i1 %167 to i8 + %169 = zext i8 %168 to i32 + store i32 %169, i32* @ui, align 4 + br label %return + +return: ; preds = %entry + ret void } -declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind - -declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind - -declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind - -declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind - define void @test_lock() nounwind { entry: - call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1] - store i8 %0, i8* @sc, align 1 - call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1] - store i8 %1, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] - call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1] - store i16 %3, i16* @ss, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] - call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1] - store i16 %5, i16* @us, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] - call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1] - store i32 %7, i32* @si, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] - call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1] - store i32 %9, i32* @ui, align 4 - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1] - call i64 @llvm.atomic.swap.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=1] - store i64 %11, i64* @sl, align 8 - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1] - call i64 @llvm.atomic.swap.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=1] - store i64 %13, i64* @ul, align 8 - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1] - call i64 @llvm.atomic.swap.i64.p0i64( i64* %14, i64 1 ) ; <i64>:15 [#uses=1] - store i64 %15, i64* @sll, align 8 - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1] - call i64 @llvm.atomic.swap.i64.p0i64( i64* %16, i64 1 ) ; <i64>:17 [#uses=1] - store i64 %17, i64* @ull, align 8 - call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false ) - volatile store i8 0, i8* @sc, align 1 - volatile store i8 0, i8* @uc, align 1 - bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:18 [#uses=1] - volatile store i16 0, i16* %18, align 2 - bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:19 [#uses=1] - volatile store i16 0, i16* %19, align 2 - bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1] - volatile store i32 0, i32* %20, align 4 - bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:21 [#uses=1] - volatile store i32 0, i32* %21, align 4 - bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:22 [#uses=1] - volatile store i64 0, i64* %22, align 8 - bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:23 [#uses=1] - volatile store i64 0, i64* %23, align 8 - bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:24 [#uses=1] - volatile store i64 0, i64* %24, align 8 - bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:25 [#uses=1] - volatile store i64 0, i64* %25, align 8 - br label %return - -return: ; preds = %entry - ret void + %0 = atomicrmw xchg i8* @sc, i8 1 monotonic + store i8 %0, i8* @sc, align 1 + %1 = atomicrmw xchg i8* @uc, i8 1 monotonic + store i8 %1, i8* @uc, align 1 + %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + %3 = atomicrmw xchg i16* %2, i16 1 monotonic + store i16 %3, i16* @ss, align 2 + %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* + %5 = atomicrmw xchg i16* %4, i16 1 monotonic + store i16 %5, i16* @us, align 2 + %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* + %7 = atomicrmw xchg i32* %6, i32 1 monotonic + store i32 %7, i32* @si, align 4 + %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + %9 = atomicrmw xchg i32* %8, i32 1 monotonic + store i32 %9, i32* @ui, align 4 + %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + %11 = atomicrmw xchg i64* %10, i64 1 monotonic + store i64 %11, i64* @sl, align 8 + %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + %13 = atomicrmw xchg i64* %12, i64 1 monotonic + store i64 %13, i64* @ul, align 8 + %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + %15 = atomicrmw xchg i64* %14, i64 1 monotonic + store i64 %15, i64* @sll, align 8 + %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + %17 = atomicrmw xchg i64* %16, i64 1 monotonic + store i64 %17, i64* @ull, align 8 + fence seq_cst + store volatile i8 0, i8* @sc, align 1 + store volatile i8 0, i8* @uc, align 1 + %18 = bitcast i8* bitcast (i16* @ss to i8*) to i16* + store volatile i16 0, i16* %18, align 2 + %19 = bitcast i8* bitcast (i16* @us to i8*) to i16* + store volatile i16 0, i16* %19, align 2 + %20 = bitcast i8* bitcast (i32* @si to i8*) to i32* + store volatile i32 0, i32* %20, align 4 + %21 = bitcast i8* bitcast (i32* @ui to i8*) to i32* + store volatile i32 0, i32* %21, align 4 + %22 = bitcast i8* bitcast (i64* @sl to i8*) to i64* + store volatile i64 0, i64* %22, align 8 + %23 = bitcast i8* bitcast (i64* @ul to i8*) to i64* + store volatile i64 0, i64* %23, align 8 + %24 = bitcast i8* bitcast (i64* @sll to i8*) to i64* + store volatile i64 0, i64* %24, align 8 + %25 = bitcast i8* bitcast (i64* @ull to i8*) to i64* + store volatile i64 0, i64* %25, align 8 + br label %return + +return: ; preds = %entry + ret void } - -declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind - -declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind - -declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind - -declare i64 @llvm.atomic.swap.i64.p0i64(i64*, i64) nounwind - -declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind diff --git a/test/CodeGen/X86/MachineSink-DbgValue.ll b/test/CodeGen/X86/MachineSink-DbgValue.ll new file mode 100644 index 0000000..ea791a3 --- /dev/null +++ b/test/CodeGen/X86/MachineSink-DbgValue.ll @@ -0,0 +1,49 @@ +; RUN: llc < %s | FileCheck %s +; Should sink matching DBG_VALUEs also. +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" +target triple = "x86_64-apple-macosx10.7.0" + +define i32 @foo(i32 %i, i32* nocapture %c) nounwind uwtable readonly ssp { + tail call void @llvm.dbg.value(metadata !{i32 %i}, i64 0, metadata !6), !dbg !12 + %ab = load i32* %c, align 1, !dbg !14 + tail call void @llvm.dbg.value(metadata !{i32* %c}, i64 0, metadata !7), !dbg !13 + tail call void @llvm.dbg.value(metadata !{i32 %ab}, i64 0, metadata !10), !dbg !14 + %cd = icmp eq i32 %i, 42, !dbg !15 + br i1 %cd, label %bb1, label %bb2, !dbg !15 + +bb1: ; preds = %0 +;CHECK: DEBUG_VALUE: a +;CHECK-NEXT: .loc 1 5 5 +;CHECK-NEXT: addl + %gh = add nsw i32 %ab, 2, !dbg !16 + br label %bb2, !dbg !16 + +bb2: + %.0 = phi i32 [ %gh, %bb1 ], [ 0, %0 ] + ret i32 %.0, !dbg !17 +} + +declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone + +!llvm.dbg.cu = !{!0} +!llvm.dbg.sp = !{!1} +!llvm.dbg.lv.foo = !{!6, !7, !10} + +!0 = metadata !{i32 589841, i32 0, i32 12, metadata !"a.c", metadata !"/private/tmp", metadata !"Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ] +!1 = metadata !{i32 589870, i32 0, metadata !2, metadata !"foo", metadata !"foo", metadata !"", metadata !2, i32 2, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32, i32*)* @foo, null, null} ; [ DW_TAG_subprogram ] +!2 = metadata !{i32 589865, metadata !"a.c", metadata !"/private/tmp", metadata !0} ; [ DW_TAG_file_type ] +!3 = metadata !{i32 589845, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] +!4 = metadata !{metadata !5} +!5 = metadata !{i32 589860, metadata !0, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] +!6 = metadata !{i32 590081, metadata !1, metadata !"i", metadata !2, i32 16777218, metadata !5, i32 0} ; [ DW_TAG_arg_variable ] +!7 = metadata !{i32 590081, metadata !1, metadata !"c", metadata !2, i32 33554434, metadata !8, i32 0} ; [ DW_TAG_arg_variable ] +!8 = metadata !{i32 589839, metadata !0, metadata !"", null, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !9} ; [ DW_TAG_pointer_type ] +!9 = metadata !{i32 589860, metadata !0, metadata !"char", null, i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ] +!10 = metadata !{i32 590080, metadata !11, metadata !"a", metadata !2, i32 3, metadata !9, i32 0} ; [ DW_TAG_auto_variable ] +!11 = metadata !{i32 589835, metadata !1, i32 2, i32 25, metadata !2, i32 0} ; [ DW_TAG_lexical_block ] +!12 = metadata !{i32 2, i32 13, metadata !1, null} +!13 = metadata !{i32 2, i32 22, metadata !1, null} +!14 = metadata !{i32 3, i32 14, metadata !11, null} +!15 = metadata !{i32 4, i32 3, metadata !11, null} +!16 = metadata !{i32 5, i32 5, metadata !11, null} +!17 = metadata !{i32 7, i32 1, metadata !11, null} diff --git a/test/CodeGen/X86/MachineSink-eflags.ll b/test/CodeGen/X86/MachineSink-eflags.ll new file mode 100644 index 0000000..5b8c7b2 --- /dev/null +++ b/test/CodeGen/X86/MachineSink-eflags.ll @@ -0,0 +1,74 @@ +; RUN: llc < %s | FileCheck %s +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" +target triple = "x86_64-pc-linux" + + +%0 = type <{ i64, i64, %1, %1, [21 x %2] }> +%1 = type <{ i64, i64, i64 }> +%2 = type <{ i32, i32, i8 addrspace(2)* }> +%3 = type { i8*, i8*, i8*, i8*, i32 } +%4 = type <{ %5*, i8*, i32, i32, [4 x i64], [4 x i64], [4 x i64], [4 x i64], [4 x i64] }> +%5 = type <{ void (i32)*, i8*, i32 (i8*, ...)* }> + +define void @foo(i8* nocapture %_stubArgs) nounwind { +entry: + %i0 = alloca i8*, align 8 + %i2 = alloca i8*, align 8 + %b.i = alloca [16 x <2 x double>], align 16 + %conv = bitcast i8* %_stubArgs to i32* + %tmp1 = load i32* %conv, align 4 + %ptr8 = getelementptr i8* %_stubArgs, i64 16 + %i4 = bitcast i8* %ptr8 to <2 x double>* + %ptr20 = getelementptr i8* %_stubArgs, i64 48 + %i7 = bitcast i8* %ptr20 to <2 x double> addrspace(1)** + %tmp21 = load <2 x double> addrspace(1)** %i7, align 8 + %ptr28 = getelementptr i8* %_stubArgs, i64 64 + %i9 = bitcast i8* %ptr28 to i32* + %tmp29 = load i32* %i9, align 4 + %ptr32 = getelementptr i8* %_stubArgs, i64 68 + %i10 = bitcast i8* %ptr32 to i32* + %tmp33 = load i32* %i10, align 4 + %tmp17.i = mul i32 10, 20 + %tmp19.i = add i32 %tmp17.i, %tmp33 + %conv21.i = zext i32 %tmp19.i to i64 + %tmp6.i = and i32 42, -32 + %tmp42.i = add i32 %tmp6.i, 17 + %tmp44.i = insertelement <2 x i32> undef, i32 %tmp42.i, i32 1 + %tmp96676677.i = or i32 17, -4 + %ptr4438.i = getelementptr inbounds [16 x <2 x double>]* %b.i, i64 0, i64 0 + %arrayidx4506.i = getelementptr [16 x <2 x double>]* %b.i, i64 0, i64 4 + %tmp52.i = insertelement <2 x i32> %tmp44.i, i32 0, i32 0 + %tmp78.i = extractelement <2 x i32> %tmp44.i, i32 1 + %tmp97.i = add i32 %tmp78.i, %tmp96676677.i + %tmp99.i = insertelement <2 x i32> %tmp52.i, i32 %tmp97.i, i32 1 + %tmp154.i = extractelement <2 x i32> %tmp99.i, i32 1 + %tmp156.i = extractelement <2 x i32> %tmp52.i, i32 0 + %tmp158.i = urem i32 %tmp156.i, %tmp1 + %i38 = mul i32 %tmp154.i, %tmp29 + %i39 = add i32 %tmp158.i, %i38 + %conv160.i = zext i32 %i39 to i64 + %tmp22.sum652.i = add i64 %conv160.i, %conv21.i + %arrayidx161.i = getelementptr <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum652.i + %tmp162.i = load <2 x double> addrspace(1)* %arrayidx161.i, align 16 + %tmp222.i = add i32 %tmp154.i, 1 + %i43 = mul i32 %tmp222.i, %tmp29 + %i44 = add i32 %tmp158.i, %i43 + %conv228.i = zext i32 %i44 to i64 + %tmp22.sum656.i = add i64 %conv228.i, %conv21.i + %arrayidx229.i = getelementptr <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum656.i + %tmp230.i = load <2 x double> addrspace(1)* %arrayidx229.i, align 16 + %cmp432.i = icmp ult i32 %tmp156.i, %tmp1 + +; %shl.i should not be sinked below the compare. +; CHECK: cmpl +; CHECK-NOT: shlq + + %cond.i = select i1 %cmp432.i, <2 x double> %tmp162.i, <2 x double> zeroinitializer + store <2 x double> %cond.i, <2 x double>* %ptr4438.i, align 16 + %cond448.i = select i1 %cmp432.i, <2 x double> %tmp230.i, <2 x double> zeroinitializer + store <2 x double> %cond448.i, <2 x double>* %arrayidx4506.i, align 16 + ret void +} + + + diff --git a/test/CodeGen/X86/SIMD/dg.exp b/test/CodeGen/X86/SIMD/dg.exp deleted file mode 100644 index 629a147..0000000 --- a/test/CodeGen/X86/SIMD/dg.exp +++ /dev/null @@ -1,5 +0,0 @@ -load_lib llvm.exp - -if { [llvm_supports_target X86] } { - RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]] -} diff --git a/test/CodeGen/X86/SIMD/notvunpcklpd.ll b/test/CodeGen/X86/SIMD/notvunpcklpd.ll deleted file mode 100644 index 3afc2f2..0000000 --- a/test/CodeGen/X86/SIMD/notvunpcklpd.ll +++ /dev/null @@ -1,20 +0,0 @@ -; RUN: llc < %s -mattr=+avx | FileCheck %s - -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" -target triple = "x86_64-unknown-linux-gnu" - -define void @try_([2 x <4 x double>]* noalias %incarray, [2 x <4 x double>]* noalias %incarrayb ) { -entry: - %incarray1 = alloca [2 x <4 x double>]*, align 8 - %incarrayb1 = alloca [2 x <4 x double>]*, align 8 - %carray = alloca [2 x <4 x double>], align 16 - %r = getelementptr [2 x <4 x double>]* %incarray, i32 0, i32 0 - %rb = getelementptr [2 x <4 x double>]* %incarrayb, i32 0, i32 0 - %r3 = load <4 x double>* %r, align 8 - %r4 = load <4 x double>* %rb, align 8 - %r11 = shufflevector <4 x double> %r3, <4 x double> %r4, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x double>> [#uses=1] -; CHECK-NOT: vunpcklpd - %r12 = getelementptr [2 x <4 x double>]* %carray, i32 0, i32 1 - store <4 x double> %r11, <4 x double>* %r12, align 4 - ret void -} diff --git a/test/CodeGen/X86/SIMD/notvunpcklps.ll b/test/CodeGen/X86/SIMD/notvunpcklps.ll deleted file mode 100644 index 19daa3e..0000000 --- a/test/CodeGen/X86/SIMD/notvunpcklps.ll +++ /dev/null @@ -1,20 +0,0 @@ -; RUN: llc < %s -mattr=+avx | FileCheck %s - -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" -target triple = "x86_64-unknown-linux-gnu" - -define void @try_([2 x <8 x float>]* noalias %incarray, [2 x <8 x float>]* noalias %incarrayb ) { -enmtry: - %incarray1 = alloca [2 x <8 x float>]*, align 8 - %incarrayb1 = alloca [2 x <8 x float>]*, align 8 - %carray = alloca [2 x <8 x float>], align 16 - %r = getelementptr [2 x <8 x float>]* %incarray, i32 0, i32 0 - %rb = getelementptr [2 x <8 x float>]* %incarrayb, i32 0, i32 0 - %r3 = load <8 x float>* %r, align 8 - %r4 = load <8 x float>* %rb, align 8 - %r8 = shufflevector <8 x float> %r3, <8 x float> %r4, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11 > ; <<8 x float>> [#uses=1] -; CHECK-NOT: vunpcklps - %r9 = getelementptr [2 x <8 x float>]* %carray, i32 0, i32 0 - store <8 x float> %r8, <8 x float>* %r9, align 4 - ret void -} diff --git a/test/CodeGen/X86/SIMD/vunpcklpd.ll b/test/CodeGen/X86/SIMD/vunpcklpd.ll deleted file mode 100644 index 60d23a4..0000000 --- a/test/CodeGen/X86/SIMD/vunpcklpd.ll +++ /dev/null @@ -1,20 +0,0 @@ -; RUN: llc < %s -mattr=+avx | FileCheck %s - -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" -target triple = "x86_64-unknown-linux-gnu" - -define void @try_([2 x <4 x double>]* noalias %incarray, [2 x <4 x double>]* noalias %incarrayb ) { -entry: - %incarray1 = alloca [2 x <4 x double>]*, align 8 - %incarrayb1 = alloca [2 x <4 x double>]*, align 8 - %carray = alloca [2 x <4 x double>], align 16 - %r = getelementptr [2 x <4 x double>]* %incarray, i32 0, i32 0 - %rb = getelementptr [2 x <4 x double>]* %incarrayb, i32 0, i32 0 - %r3 = load <4 x double>* %r, align 8 - %r4 = load <4 x double>* %rb, align 8 - %r11 = shufflevector <4 x double> %r3, <4 x double> %r4, <4 x i32> < i32 0, i32 4, i32 2, i32 6 > ; <<4 x double>> [#uses=1] -; CHECK: vunpcklpd - %r12 = getelementptr [2 x <4 x double>]* %carray, i32 0, i32 1 - store <4 x double> %r11, <4 x double>* %r12, align 4 - ret void -} diff --git a/test/CodeGen/X86/SIMD/vunpcklps.ll b/test/CodeGen/X86/SIMD/vunpcklps.ll deleted file mode 100644 index a87b299..0000000 --- a/test/CodeGen/X86/SIMD/vunpcklps.ll +++ /dev/null @@ -1,20 +0,0 @@ -; RUN: llc < %s -mattr=+avx | FileCheck %s - -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" -target triple = "x86_64-unknown-linux-gnu" - -define void @try_([2 x <8 x float>]* noalias %incarray, [2 x <8 x float>]* noalias %incarrayb ) { -entry: - %incarray1 = alloca [2 x <8 x float>]*, align 8 - %incarrayb1 = alloca [2 x <8 x float>]*, align 8 - %carray = alloca [2 x <8 x float>], align 16 - %r = getelementptr [2 x <8 x float>]* %incarray, i32 0, i32 0 - %rb = getelementptr [2 x <8 x float>]* %incarrayb, i32 0, i32 0 - %r3 = load <8 x float>* %r, align 8 - %r4 = load <8 x float>* %rb, align 8 - %r11 = shufflevector <8 x float> %r3, <8 x float> %r4, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13 > ; <<8 x float>> [#uses=1] -; CHECK: vunpcklps - %r12 = getelementptr [2 x <8 x float>]* %carray, i32 0, i32 1 - store <8 x float> %r11, <8 x float>* %r12, align 4 - ret void -} diff --git a/test/CodeGen/X86/alignment-2.ll b/test/CodeGen/X86/alignment-2.ll new file mode 100644 index 0000000..cc709b5 --- /dev/null +++ b/test/CodeGen/X86/alignment-2.ll @@ -0,0 +1,28 @@ +; RUN: llc < %s -mtriple i386-apple-darwin10 | FileCheck %s +; <rdar://problem/10058036> + +%struct._psqlSettings = type { %struct.pg_conn*, i32, %struct.__sFILE*, i8, %struct.printQueryOpt, i8*, i8, i32, %struct.__sFILE*, i8, i32, i8*, i8*, i8*, i64, i8, %struct.__sFILE*, %struct._variable*, i8, i8, i8, i8, i8, i32, i32, i32, i32, i32, i8*, i8*, i8*, i32 } +%struct.pg_conn = type opaque +%struct.__sFILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 } +%struct.__sbuf = type { i8*, i32 } +%struct.__sFILEX = type opaque +%struct.printQueryOpt = type { %struct.printTableOpt, i8*, i8, i8*, i8**, i8, i8, i8* } +%struct.printTableOpt = type { i32, i8, i16, i16, i8, i8, i8, i32, %struct.printTextFormat*, i8*, i8*, i8, i8*, i32, i32, i32 } +%struct.printTextFormat = type { i8*, [4 x %struct.printTextLineFormat], i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8 } +%struct.printTextLineFormat = type { i8*, i8*, i8*, i8* } +%struct._variable = type { i8*, i8*, void (i8*)*, %struct._variable* } +%struct.pg_result = type opaque + +@pset = external global %struct._psqlSettings + +define signext i8 @do_lo_list() nounwind optsize ssp { +bb: +; CHECK: do_lo_list +; CHECK-NOT: movaps + %myopt = alloca %struct.printQueryOpt, align 4 + %tmp = bitcast %struct.printQueryOpt* %myopt to i8* + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* bitcast (%struct.printQueryOpt* getelementptr inbounds (%struct._psqlSettings* @pset, i32 0, i32 4) to i8*), i32 76, i32 4, i1 false) + ret i8 0 +} + +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind diff --git a/test/CodeGen/X86/alignment.ll b/test/CodeGen/X86/alignment.ll index 7e91115..5908c0c 100644 --- a/test/CodeGen/X86/alignment.ll +++ b/test/CodeGen/X86/alignment.ll @@ -40,4 +40,4 @@ ; CHECK: .comm GlobalBS,384,8 @GlobalCS = common global { [384 x i8] } zeroinitializer, align 2, section "foo" -; CHECK: .comm GlobalCS,384,2
\ No newline at end of file +; CHECK: .comm GlobalCS,384,2 diff --git a/test/CodeGen/X86/asm-label2.ll b/test/CodeGen/X86/asm-label2.ll index 0b5de34..8715aa9 100644 --- a/test/CodeGen/X86/asm-label2.ll +++ b/test/CodeGen/X86/asm-label2.ll @@ -16,7 +16,11 @@ invoke.cont: ; preds = %entry ret void lpad: ; preds = %entry + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable } declare void @_zed() ssp align 2 + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/atomic-load-store-wide.ll b/test/CodeGen/X86/atomic-load-store-wide.ll new file mode 100644 index 0000000..a9ebfef --- /dev/null +++ b/test/CodeGen/X86/atomic-load-store-wide.ll @@ -0,0 +1,19 @@ +; RUN: llc < %s -march=x86 | FileCheck %s + +; 64-bit load/store on x86-32 +; FIXME: The generated code can be substantially improved. + +define void @test1(i64* %ptr, i64 %val1) { +; CHECK: test1 +; CHECK: cmpxchg8b +; CHECK-NEXT: jne + store atomic i64 %val1, i64* %ptr seq_cst, align 8 + ret void +} + +define i64 @test2(i64* %ptr) { +; CHECK: test2 +; CHECK: cmpxchg8b + %val = load atomic i64* %ptr seq_cst, align 8 + ret i64 %val +} diff --git a/test/CodeGen/X86/atomic-load-store.ll b/test/CodeGen/X86/atomic-load-store.ll new file mode 100644 index 0000000..fee4585 --- /dev/null +++ b/test/CodeGen/X86/atomic-load-store.ll @@ -0,0 +1,23 @@ +; RUN: llc < %s -mtriple=x86_64-apple-macosx10.7.0 -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-apple-macosx10.7.0 -O0 | FileCheck %s + +define void @test1(i32* %ptr, i32 %val1) { +; CHECK: test1 +; CHECK: xchgl %esi, (%rdi) + store atomic i32 %val1, i32* %ptr seq_cst, align 4 + ret void +} + +define void @test2(i32* %ptr, i32 %val1) { +; CHECK: test2 +; CHECK: movl %esi, (%rdi) + store atomic i32 %val1, i32* %ptr release, align 4 + ret void +} + +define i32 @test3(i32* %ptr) { +; CHECK: test3 +; CHECK: movl (%rdi), %eax + %val = load atomic i32* %ptr seq_cst, align 4 + ret i32 %val +} diff --git a/test/CodeGen/X86/atomic-or.ll b/test/CodeGen/X86/atomic-or.ll index 164252d..3f02eaf 100644 --- a/test/CodeGen/X86/atomic-or.ll +++ b/test/CodeGen/X86/atomic-or.ll @@ -7,13 +7,11 @@ entry: %p.addr = alloca i64*, align 8 store i64* %p, i64** %p.addr, align 8 %tmp = load i64** %p.addr, align 8 - call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) ; CHECK: t1: ; CHECK: movl $2147483648, %eax ; CHECK: lock ; CHECK-NEXT: orq %r{{.*}}, (%r{{.*}}) - %0 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %tmp, i64 2147483648) - call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + %0 = atomicrmw or i64* %tmp, i64 2147483648 seq_cst ret void } @@ -22,15 +20,9 @@ entry: %p.addr = alloca i64*, align 8 store i64* %p, i64** %p.addr, align 8 %tmp = load i64** %p.addr, align 8 - call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) ; CHECK: t2: -; CHECK-NOT: movl ; CHECK: lock ; CHECK-NEXT: orq $2147483644, (%r{{.*}}) - %0 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %tmp, i64 2147483644) - call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + %0 = atomicrmw or i64* %tmp, i64 2147483644 seq_cst ret void } - -declare i64 @llvm.atomic.load.or.i64.p0i64(i64* nocapture, i64) nounwind -declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind diff --git a/test/CodeGen/X86/atomic_add.ll b/test/CodeGen/X86/atomic_add.ll index 26d25e2..1fce256 100644 --- a/test/CodeGen/X86/atomic_add.ll +++ b/test/CodeGen/X86/atomic_add.ll @@ -6,80 +6,74 @@ define void @sub1(i32* nocapture %p, i32 %v) nounwind ssp { entry: ; CHECK: sub1: ; CHECK: subl - %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0] - ret void + %0 = atomicrmw sub i32* %p, i32 %v monotonic + ret void } define void @inc4(i64* nocapture %p) nounwind ssp { entry: ; CHECK: inc4: ; CHECK: incq - %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0] - ret void + %0 = atomicrmw add i64* %p, i64 1 monotonic + ret void } -declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind - define void @add8(i64* nocapture %p) nounwind ssp { entry: ; CHECK: add8: ; CHECK: addq $2 - %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0] - ret void + %0 = atomicrmw add i64* %p, i64 2 monotonic + ret void } define void @add4(i64* nocapture %p, i32 %v) nounwind ssp { entry: ; CHECK: add4: ; CHECK: addq - %0 = sext i32 %v to i64 ; <i64> [#uses=1] - %1 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0] - ret void + %0 = sext i32 %v to i64 ; <i64> [#uses=1] + %1 = atomicrmw add i64* %p, i64 %0 monotonic + ret void } define void @inc3(i8* nocapture %p) nounwind ssp { entry: ; CHECK: inc3: ; CHECK: incb - %0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0] - ret void + %0 = atomicrmw add i8* %p, i8 1 monotonic + ret void } -declare i8 @llvm.atomic.load.add.i8.p0i8(i8* nocapture, i8) nounwind - define void @add7(i8* nocapture %p) nounwind ssp { entry: ; CHECK: add7: ; CHECK: addb $2 - %0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0] - ret void + %0 = atomicrmw add i8* %p, i8 2 monotonic + ret void } define void @add3(i8* nocapture %p, i32 %v) nounwind ssp { entry: ; CHECK: add3: ; CHECK: addb - %0 = trunc i32 %v to i8 ; <i8> [#uses=1] - %1 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0] - ret void + %0 = trunc i32 %v to i8 ; <i8> [#uses=1] + %1 = atomicrmw add i8* %p, i8 %0 monotonic + ret void } define void @inc2(i16* nocapture %p) nounwind ssp { entry: ; CHECK: inc2: ; CHECK: incw - %0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0] - ret void + %0 = atomicrmw add i16* %p, i16 1 monotonic + ret void } -declare i16 @llvm.atomic.load.add.i16.p0i16(i16* nocapture, i16) nounwind - define void @add6(i16* nocapture %p) nounwind ssp { entry: ; CHECK: add6: ; CHECK: addw $2 - %0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0] - ret void + %0 = atomicrmw add i16* %p, i16 2 monotonic + ret void } define void @add2(i16* nocapture %p, i32 %v) nounwind ssp { @@ -87,52 +81,48 @@ entry: ; CHECK: add2: ; CHECK: addw %0 = trunc i32 %v to i16 ; <i16> [#uses=1] - %1 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0] - ret void + %1 = atomicrmw add i16* %p, i16 %0 monotonic + ret void } define void @inc1(i32* nocapture %p) nounwind ssp { entry: ; CHECK: inc1: ; CHECK: incl - %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0] - ret void + %0 = atomicrmw add i32* %p, i32 1 monotonic + ret void } -declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind - define void @add5(i32* nocapture %p) nounwind ssp { entry: ; CHECK: add5: ; CHECK: addl $2 - %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0] - ret void + %0 = atomicrmw add i32* %p, i32 2 monotonic + ret void } define void @add1(i32* nocapture %p, i32 %v) nounwind ssp { entry: ; CHECK: add1: ; CHECK: addl - %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0] - ret void + %0 = atomicrmw add i32* %p, i32 %v monotonic + ret void } define void @dec4(i64* nocapture %p) nounwind ssp { entry: ; CHECK: dec4: ; CHECK: decq - %0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0] - ret void + %0 = atomicrmw sub i64* %p, i64 1 monotonic + ret void } -declare i64 @llvm.atomic.load.sub.i64.p0i64(i64* nocapture, i64) nounwind - define void @sub8(i64* nocapture %p) nounwind ssp { entry: ; CHECK: sub8: ; CHECK: subq $2 - %0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0] - ret void + %0 = atomicrmw sub i64* %p, i64 2 monotonic + ret void } define void @sub4(i64* nocapture %p, i32 %v) nounwind ssp { @@ -140,26 +130,24 @@ entry: ; CHECK: sub4: ; CHECK: subq %0 = sext i32 %v to i64 ; <i64> [#uses=1] - %1 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0] - ret void + %1 = atomicrmw sub i64* %p, i64 %0 monotonic + ret void } define void @dec3(i8* nocapture %p) nounwind ssp { entry: ; CHECK: dec3: ; CHECK: decb - %0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0] - ret void + %0 = atomicrmw sub i8* %p, i8 1 monotonic + ret void } -declare i8 @llvm.atomic.load.sub.i8.p0i8(i8* nocapture, i8) nounwind - define void @sub7(i8* nocapture %p) nounwind ssp { entry: ; CHECK: sub7: ; CHECK: subb $2 - %0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0] - ret void + %0 = atomicrmw sub i8* %p, i8 2 monotonic + ret void } define void @sub3(i8* nocapture %p, i32 %v) nounwind ssp { @@ -167,26 +155,24 @@ entry: ; CHECK: sub3: ; CHECK: subb %0 = trunc i32 %v to i8 ; <i8> [#uses=1] - %1 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0] - ret void + %1 = atomicrmw sub i8* %p, i8 %0 monotonic + ret void } define void @dec2(i16* nocapture %p) nounwind ssp { entry: ; CHECK: dec2: ; CHECK: decw - %0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0] - ret void + %0 = atomicrmw sub i16* %p, i16 1 monotonic + ret void } -declare i16 @llvm.atomic.load.sub.i16.p0i16(i16* nocapture, i16) nounwind - define void @sub6(i16* nocapture %p) nounwind ssp { entry: ; CHECK: sub6: ; CHECK: subw $2 - %0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0] - ret void + %0 = atomicrmw sub i16* %p, i16 2 monotonic + ret void } define void @sub2(i16* nocapture %p, i32 %v) nounwind ssp { @@ -194,24 +180,22 @@ entry: ; CHECK: sub2: ; CHECK: negl %0 = trunc i32 %v to i16 ; <i16> [#uses=1] - %1 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0] - ret void + %1 = atomicrmw sub i16* %p, i16 %0 monotonic + ret void } define void @dec1(i32* nocapture %p) nounwind ssp { entry: ; CHECK: dec1: ; CHECK: decl - %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0] - ret void + %0 = atomicrmw sub i32* %p, i32 1 monotonic + ret void } -declare i32 @llvm.atomic.load.sub.i32.p0i32(i32* nocapture, i32) nounwind - define void @sub5(i32* nocapture %p) nounwind ssp { entry: ; CHECK: sub5: ; CHECK: subl $2 - %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0] - ret void + %0 = atomicrmw sub i32* %p, i32 2 monotonic + ret void } diff --git a/test/CodeGen/X86/atomic_op.ll b/test/CodeGen/X86/atomic_op.ll index f3ade93..972dab2 100644 --- a/test/CodeGen/X86/atomic_op.ll +++ b/test/CodeGen/X86/atomic_op.ll @@ -24,87 +24,87 @@ entry: %tmp = load i32* %temp ; CHECK: lock ; CHECK: xaddl - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val1, i32 %tmp ) ; <i32>:0 [#uses=1] + %0 = atomicrmw add i32* %val1, i32 %tmp monotonic store i32 %0, i32* %old ; CHECK: lock ; CHECK: xaddl - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 30 ) ; <i32>:1 [#uses=1] + %1 = atomicrmw sub i32* %val2, i32 30 monotonic store i32 %1, i32* %old ; CHECK: lock ; CHECK: xaddl - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:2 [#uses=1] + %2 = atomicrmw add i32* %val2, i32 1 monotonic store i32 %2, i32* %old ; CHECK: lock ; CHECK: xaddl - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:3 [#uses=1] + %3 = atomicrmw sub i32* %val2, i32 1 monotonic store i32 %3, i32* %old ; CHECK: andl ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %andt, i32 4080 ) ; <i32>:4 [#uses=1] + %4 = atomicrmw and i32* %andt, i32 4080 monotonic store i32 %4, i32* %old ; CHECK: orl ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ort, i32 4080 ) ; <i32>:5 [#uses=1] + %5 = atomicrmw or i32* %ort, i32 4080 monotonic store i32 %5, i32* %old ; CHECK: xorl ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %xort, i32 4080 ) ; <i32>:6 [#uses=1] + %6 = atomicrmw xor i32* %xort, i32 4080 monotonic store i32 %6, i32* %old ; CHECK: cmov ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 16 ) ; <i32>:7 [#uses=1] + %7 = atomicrmw min i32* %val2, i32 16 monotonic store i32 %7, i32* %old %neg = sub i32 0, 1 ; <i32> [#uses=1] ; CHECK: cmov ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 %neg ) ; <i32>:8 [#uses=1] + %8 = atomicrmw min i32* %val2, i32 %neg monotonic store i32 %8, i32* %old ; CHECK: cmov ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:9 [#uses=1] + %9 = atomicrmw max i32* %val2, i32 1 monotonic store i32 %9, i32* %old ; CHECK: cmov ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 0 ) ; <i32>:10 [#uses=1] + %10 = atomicrmw max i32* %val2, i32 0 monotonic store i32 %10, i32* %old ; CHECK: cmov ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 65535 ) ; <i32>:11 [#uses=1] + %11 = atomicrmw umax i32* %val2, i32 65535 monotonic store i32 %11, i32* %old ; CHECK: cmov ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 10 ) ; <i32>:12 [#uses=1] + %12 = atomicrmw umax i32* %val2, i32 10 monotonic store i32 %12, i32* %old ; CHECK: cmov ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:13 [#uses=1] + %13 = atomicrmw umin i32* %val2, i32 1 monotonic store i32 %13, i32* %old ; CHECK: cmov ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 10 ) ; <i32>:14 [#uses=1] + %14 = atomicrmw umin i32* %val2, i32 10 monotonic store i32 %14, i32* %old ; CHECK: xchgl %{{.*}}, {{.*}}(%esp) - call i32 @llvm.atomic.swap.i32.p0i32( i32* %val2, i32 1976 ) ; <i32>:15 [#uses=1] + %15 = atomicrmw xchg i32* %val2, i32 1976 monotonic store i32 %15, i32* %old %neg1 = sub i32 0, 10 ; <i32> [#uses=1] ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 %neg1, i32 1 ) ; <i32>:16 [#uses=1] + %16 = cmpxchg i32* %val2, i32 %neg1, i32 1 monotonic store i32 %16, i32* %old ; CHECK: lock ; CHECK: cmpxchgl - call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 1976, i32 1 ) ; <i32>:17 [#uses=1] + %17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic store i32 %17, i32* %old ret void } @@ -114,30 +114,6 @@ entry: ; CHECK: lock ; CHECK: cmpxchgl %{{.*}}, %gs:(%{{.*}}) - %0 = tail call i32 @llvm.atomic.cmp.swap.i32.p256i32(i32 addrspace(256)* %P, i32 0, i32 1) + %0 = cmpxchg i32 addrspace(256)* %P, i32 0, i32 1 monotonic ret void } - -declare i32 @llvm.atomic.cmp.swap.i32.p256i32(i32 addrspace(256)* nocapture, i32, i32) nounwind - -declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.min.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.max.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.umax.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.umin.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind diff --git a/test/CodeGen/X86/avx-256.ll b/test/CodeGen/X86/avx-256.ll deleted file mode 100644 index 20d31e7..0000000 --- a/test/CodeGen/X86/avx-256.ll +++ /dev/null @@ -1,15 +0,0 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86 -mcpu=corei7 -mattr=avx | FileCheck %s - -@x = common global <8 x float> zeroinitializer, align 32 -@y = common global <4 x double> zeroinitializer, align 32 - -define void @zero() nounwind ssp { -entry: - ; CHECK: vxorps - ; CHECK: vmovaps - ; CHECK: vmovaps - store <8 x float> zeroinitializer, <8 x float>* @x, align 32 - store <4 x double> zeroinitializer, <4 x double>* @y, align 32 - ret void -} - diff --git a/test/CodeGen/X86/avx-256-arith.ll b/test/CodeGen/X86/avx-arith.ll index 5c512db..59988ca 100644 --- a/test/CodeGen/X86/avx-256-arith.ll +++ b/test/CodeGen/X86/avx-arith.ll @@ -114,3 +114,148 @@ entry: ret <8 x float> %div.i } +; CHECK: vsqrtss +define float @sqrtA(float %a) nounwind uwtable readnone ssp { +entry: + %conv1 = tail call float @sqrtf(float %a) nounwind readnone + ret float %conv1 +} + +declare double @sqrt(double) readnone + +; CHECK: vsqrtsd +define double @sqrtB(double %a) nounwind uwtable readnone ssp { +entry: + %call = tail call double @sqrt(double %a) nounwind readnone + ret double %call +} + +declare float @sqrtf(float) readnone + + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vpaddq %xmm +; CHECK-NEXT: vpaddq %xmm +; CHECK-NEXT: vinsertf128 $1 +define <4 x i64> @vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone { + %x = add <4 x i64> %i, %j + ret <4 x i64> %x +} + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vpaddd %xmm +; CHECK-NEXT: vpaddd %xmm +; CHECK-NEXT: vinsertf128 $1 +define <8 x i32> @vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone { + %x = add <8 x i32> %i, %j + ret <8 x i32> %x +} + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vpaddw %xmm +; CHECK-NEXT: vpaddw %xmm +; CHECK-NEXT: vinsertf128 $1 +define <16 x i16> @vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone { + %x = add <16 x i16> %i, %j + ret <16 x i16> %x +} + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vpaddb %xmm +; CHECK-NEXT: vpaddb %xmm +; CHECK-NEXT: vinsertf128 $1 +define <32 x i8> @vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone { + %x = add <32 x i8> %i, %j + ret <32 x i8> %x +} + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vpsubq %xmm +; CHECK-NEXT: vpsubq %xmm +; CHECK-NEXT: vinsertf128 $1 +define <4 x i64> @vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone { + %x = sub <4 x i64> %i, %j + ret <4 x i64> %x +} + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vpsubd %xmm +; CHECK-NEXT: vpsubd %xmm +; CHECK-NEXT: vinsertf128 $1 +define <8 x i32> @vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone { + %x = sub <8 x i32> %i, %j + ret <8 x i32> %x +} + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vpsubw %xmm +; CHECK-NEXT: vpsubw %xmm +; CHECK-NEXT: vinsertf128 $1 +define <16 x i16> @vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone { + %x = sub <16 x i16> %i, %j + ret <16 x i16> %x +} + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vpsubb %xmm +; CHECK-NEXT: vpsubb %xmm +; CHECK-NEXT: vinsertf128 $1 +define <32 x i8> @vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone { + %x = sub <32 x i8> %i, %j + ret <32 x i8> %x +} + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vpmulld %xmm +; CHECK-NEXT: vpmulld %xmm +; CHECK-NEXT: vinsertf128 $1 +define <8 x i32> @vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone { + %x = mul <8 x i32> %i, %j + ret <8 x i32> %x +} + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vpmullw %xmm +; CHECK-NEXT: vpmullw %xmm +; CHECK-NEXT: vinsertf128 $1 +define <16 x i16> @vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone { + %x = mul <16 x i16> %i, %j + ret <16 x i16> %x +} + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vpmuludq %xmm +; CHECK-NEXT: vpsrlq $32, %xmm +; CHECK-NEXT: vpmuludq %xmm +; CHECK-NEXT: vpsllq $32, %xmm +; CHECK-NEXT: vpaddq %xmm +; CHECK-NEXT: vpmuludq %xmm +; CHECK-NEXT: vpsrlq $32, %xmm +; CHECK-NEXT: vpmuludq %xmm +; CHECK-NEXT: vpsllq $32, %xmm +; CHECK-NEXT: vpsrlq $32, %xmm +; CHECK-NEXT: vpmuludq %xmm +; CHECK-NEXT: vpsllq $32, %xmm +; CHECK-NEXT: vpaddq %xmm +; CHECK-NEXT: vpaddq %xmm +; CHECK-NEXT: vpsrlq $32, %xmm +; CHECK-NEXT: vpmuludq %xmm +; CHECK-NEXT: vpsllq $32, %xmm +; CHECK-NEXT: vpaddq %xmm +; CHECK-NEXT: vinsertf128 $1 +define <4 x i64> @mul-v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone { + %x = mul <4 x i64> %i, %j + ret <4 x i64> %x +} + diff --git a/test/CodeGen/X86/avx-basic.ll b/test/CodeGen/X86/avx-basic.ll new file mode 100644 index 0000000..0a46b08 --- /dev/null +++ b/test/CodeGen/X86/avx-basic.ll @@ -0,0 +1,107 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +@x = common global <8 x float> zeroinitializer, align 32 +@y = common global <4 x double> zeroinitializer, align 32 +@z = common global <4 x float> zeroinitializer, align 16 + +define void @zero128() nounwind ssp { +entry: + ; CHECK: vpxor + ; CHECK: vmovaps + store <4 x float> zeroinitializer, <4 x float>* @z, align 16 + ret void +} + +define void @zero256() nounwind ssp { +entry: + ; CHECK: vxorps + ; CHECK: vmovaps + ; CHECK: vmovaps + store <8 x float> zeroinitializer, <8 x float>* @x, align 32 + store <4 x double> zeroinitializer, <4 x double>* @y, align 32 + ret void +} + +; CHECK: vpcmpeqd +; CHECK: vinsertf128 $1 +define void @ones([0 x float]* nocapture %RET, [0 x float]* nocapture %aFOO) nounwind { +allocas: + %ptr2vec615 = bitcast [0 x float]* %RET to <8 x float>* + store <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float +0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float +0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, <8 x +float>* %ptr2vec615, align 32 + ret void +} + +; CHECK: vpcmpeqd +; CHECK: vinsertf128 $1 +define void @ones2([0 x i32]* nocapture %RET, [0 x i32]* nocapture %aFOO) nounwind { +allocas: + %ptr2vec615 = bitcast [0 x i32]* %RET to <8 x i32>* + store <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32>* %ptr2vec615, align 32 + ret void +} + +;;; Just make sure this doesn't crash +; CHECK: _ISelCrash +define <4 x i64> @ISelCrash(<4 x i64> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 4> + ret <4 x i64> %shuffle +} + +;;; +;;; Check that some 256-bit vectors are xformed into 128 ops +; CHECK: _A +; CHECK: vshufpd $1 +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: vshufpd $1 +; CHECK-NEXT: vinsertf128 $1 +define <4 x i64> @A(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 0, i32 7, i32 6> + ret <4 x i64> %shuffle +} + +; CHECK: _B +; CHECK: vshufpd $1, %ymm +define <4 x i64> @B(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 undef, i32 undef, i32 6> + ret <4 x i64> %shuffle +} + +; CHECK: movlhps +; CHECK-NEXT: vextractf128 $1 +; CHECK-NEXT: movlhps +; CHECK-NEXT: vinsertf128 $1 +define <4 x i64> @C(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 undef, i32 0, i32 undef, i32 6> + ret <4 x i64> %shuffle +} + +; CHECK: vpshufd $-96 +; CHECK: vpshufd $-6 +; CHECK: vinsertf128 $1 +define <8 x i32> @D(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 10, i32 10, i32 11, i32 11> + ret <8 x i32> %shuffle +} + +;;; Don't crash on movd +; CHECK: _VMOVZQI2PQI +; CHECK: vmovd (% +define <8 x i32> @VMOVZQI2PQI([0 x float]* nocapture %aFOO) nounwind { +allocas: + %ptrcast.i33.i = bitcast [0 x float]* %aFOO to i32* + %val.i34.i = load i32* %ptrcast.i33.i, align 4 + %ptroffset.i22.i992 = getelementptr [0 x float]* %aFOO, i64 0, i64 1 + %ptrcast.i23.i = bitcast float* %ptroffset.i22.i992 to i32* + %val.i24.i = load i32* %ptrcast.i23.i, align 4 + %updatedret.i30.i = insertelement <8 x i32> undef, i32 %val.i34.i, i32 1 + ret <8 x i32> %updatedret.i30.i +} + diff --git a/test/CodeGen/X86/avx-bitcast.ll b/test/CodeGen/X86/avx-bitcast.ll new file mode 100644 index 0000000..ecc71be --- /dev/null +++ b/test/CodeGen/X86/avx-bitcast.ll @@ -0,0 +1,10 @@ +; RUN: llc < %s -O0 -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; CHECK: vmovsd (% +; CHECK-NEXT: vmovd %xmm +define i64 @bitcasti64tof64() { + %a = load double* undef + %b = bitcast double %a to i64 + ret i64 %b +} + diff --git a/test/CodeGen/X86/avx-blend.ll b/test/CodeGen/X86/avx-blend.ll new file mode 100644 index 0000000..7729491 --- /dev/null +++ b/test/CodeGen/X86/avx-blend.ll @@ -0,0 +1,104 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -promote-elements -mattr=+avx | FileCheck %s + +; AVX128 tests: + +;CHECK: vsel_float +;CHECK: vblendvps +;CHECK: ret +define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) { + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2 + ret <4 x float> %vsel +} + + +;CHECK: vsel_i32 +;CHECK: vblendvps +;CHECK: ret +define <4 x i32> @vsel_i32(<4 x i32> %v1, <4 x i32> %v2) { + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %v1, <4 x i32> %v2 + ret <4 x i32> %vsel +} + + +;CHECK: vsel_double +;CHECK: vblendvpd +;CHECK: ret +define <2 x double> @vsel_double(<2 x double> %v1, <2 x double> %v2) { + %vsel = select <2 x i1> <i1 true, i1 false>, <2 x double> %v1, <2 x double> %v2 + ret <2 x double> %vsel +} + + +;CHECK: vsel_i64 +;CHECK: vblendvpd +;CHECK: ret +define <2 x i64> @vsel_i64(<2 x i64> %v1, <2 x i64> %v2) { + %vsel = select <2 x i1> <i1 true, i1 false>, <2 x i64> %v1, <2 x i64> %v2 + ret <2 x i64> %vsel +} + + +;CHECK: vsel_i8 +;CHECK: vpblendvb +;CHECK: ret +define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) { + %vsel = select <16 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <16 x i8> %v1, <16 x i8> %v2 + ret <16 x i8> %vsel +} + + +; AVX256 tests: + + +;CHECK: vsel_float +;CHECK: vblendvps +;CHECK: ret +define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) { + %vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x float> %v1, <8 x float> %v2 + ret <8 x float> %vsel +} + +;CHECK: vsel_i32 +;CHECK: vblendvps +;CHECK: ret +define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) { + %vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i32> %v1, <8 x i32> %v2 + ret <8 x i32> %vsel +} + +;CHECK: vsel_double +;CHECK: vblendvpd +;CHECK: ret +define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) { + %vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x double> %v1, <8 x double> %v2 + ret <8 x double> %vsel +} + +;CHECK: vsel_i64 +;CHECK: vblendvpd +;CHECK: ret +define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) { + %vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i64> %v1, <8 x i64> %v2 + ret <8 x i64> %vsel +} + +;; TEST blend + compares +; CHECK: A +define <2 x double> @A(<2 x double> %x, <2 x double> %y) { + ; CHECK: vcmplepd + ; CHECK: vblendvpd + %max_is_x = fcmp oge <2 x double> %x, %y + %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y + ret <2 x double> %max +} + +; CHECK: B +define <2 x double> @B(<2 x double> %x, <2 x double> %y) { + ; CHECK: vcmpnlepd + ; CHECK: vblendvpd + %min_is_x = fcmp ult <2 x double> %x, %y + %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y + ret <2 x double> %min +} + + diff --git a/test/CodeGen/X86/avx-cast.ll b/test/CodeGen/X86/avx-cast.ll new file mode 100644 index 0000000..d6d2415 --- /dev/null +++ b/test/CodeGen/X86/avx-cast.ll @@ -0,0 +1,47 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; CHECK: vxorps +; CHECK-NEXT: vinsertf128 $0 +define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <4 x float> %m, <4 x float> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4> + ret <8 x float> %shuffle.i +} + +; CHECK: vxorps +; CHECK-NEXT: vinsertf128 $0 +define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <2 x double> %m, <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2> + ret <4 x double> %shuffle.i +} + +; CHECK: vpxor +; CHECK-NEXT: vinsertf128 $0 +define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <2 x i64> %m, <2 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2> + ret <4 x i64> %shuffle.i +} + +; CHECK-NOT: vextractf128 $0 +define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <8 x float> %m, <8 x float> %m, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + ret <4 x float> %shuffle.i +} + +; CHECK-NOT: vextractf128 $0 +define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <4 x i64> %m, <4 x i64> %m, <2 x i32> <i32 0, i32 1> + ret <2 x i64> %shuffle.i +} + +; CHECK-NOT: vextractf128 $0 +define <2 x double> @castF(<4 x double> %m) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <4 x double> %m, <4 x double> %m, <2 x i32> <i32 0, i32 1> + ret <2 x double> %shuffle.i +} + diff --git a/test/CodeGen/X86/avx-cmp.ll b/test/CodeGen/X86/avx-cmp.ll new file mode 100644 index 0000000..a050d6a --- /dev/null +++ b/test/CodeGen/X86/avx-cmp.ll @@ -0,0 +1,150 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; CHECK: vcmpltps %ymm +; CHECK-NOT: vucomiss +define <8 x i32> @cmp00(<8 x float> %a, <8 x float> %b) nounwind readnone { + %bincmp = fcmp olt <8 x float> %a, %b + %s = sext <8 x i1> %bincmp to <8 x i32> + ret <8 x i32> %s +} + +; CHECK: vcmpltpd %ymm +; CHECK-NOT: vucomisd +define <4 x i64> @cmp01(<4 x double> %a, <4 x double> %b) nounwind readnone { + %bincmp = fcmp olt <4 x double> %a, %b + %s = sext <4 x i1> %bincmp to <4 x i64> + ret <4 x i64> %s +} + +declare void @scale() nounwind uwtable + +; CHECK: vucomisd +define void @render() nounwind uwtable { +entry: + br i1 undef, label %for.cond5, label %for.end52 + +for.cond5: + %or.cond = and i1 undef, false + br i1 %or.cond, label %for.body33, label %for.cond5 + +for.cond30: + br i1 false, label %for.body33, label %for.cond5 + +for.body33: + %tobool = fcmp une double undef, 0.000000e+00 + br i1 %tobool, label %if.then, label %for.cond30 + +if.then: + call void @scale() + br label %for.cond30 + +for.end52: + ret void +} + +; CHECK: vextractf128 $1 +; CHECK: vextractf128 $1 +; CHECK-NEXT: vpcmpgtd %xmm +; CHECK-NEXT: vpcmpgtd %xmm +; CHECK-NEXT: vinsertf128 $1 +define <8 x i32> @int256-cmp(<8 x i32> %i, <8 x i32> %j) nounwind readnone { + %bincmp = icmp slt <8 x i32> %i, %j + %x = sext <8 x i1> %bincmp to <8 x i32> + ret <8 x i32> %x +} + +; CHECK: vextractf128 $1 +; CHECK: vextractf128 $1 +; CHECK-NEXT: vpcmpgtq %xmm +; CHECK-NEXT: vpcmpgtq %xmm +; CHECK-NEXT: vinsertf128 $1 +define <4 x i64> @v4i64-cmp(<4 x i64> %i, <4 x i64> %j) nounwind readnone { + %bincmp = icmp slt <4 x i64> %i, %j + %x = sext <4 x i1> %bincmp to <4 x i64> + ret <4 x i64> %x +} + +; CHECK: vextractf128 $1 +; CHECK: vextractf128 $1 +; CHECK-NEXT: vpcmpgtw %xmm +; CHECK-NEXT: vpcmpgtw %xmm +; CHECK-NEXT: vinsertf128 $1 +define <16 x i16> @v16i16-cmp(<16 x i16> %i, <16 x i16> %j) nounwind readnone { + %bincmp = icmp slt <16 x i16> %i, %j + %x = sext <16 x i1> %bincmp to <16 x i16> + ret <16 x i16> %x +} + +; CHECK: vextractf128 $1 +; CHECK: vextractf128 $1 +; CHECK-NEXT: vpcmpgtb %xmm +; CHECK-NEXT: vpcmpgtb %xmm +; CHECK-NEXT: vinsertf128 $1 +define <32 x i8> @v32i8-cmp(<32 x i8> %i, <32 x i8> %j) nounwind readnone { + %bincmp = icmp slt <32 x i8> %i, %j + %x = sext <32 x i1> %bincmp to <32 x i8> + ret <32 x i8> %x +} + +; CHECK: vextractf128 $1 +; CHECK: vextractf128 $1 +; CHECK-NEXT: vpcmpeqd %xmm +; CHECK-NEXT: vpcmpeqd %xmm +; CHECK-NEXT: vinsertf128 $1 +define <8 x i32> @int256-cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone { + %bincmp = icmp eq <8 x i32> %i, %j + %x = sext <8 x i1> %bincmp to <8 x i32> + ret <8 x i32> %x +} + +; CHECK: vextractf128 $1 +; CHECK: vextractf128 $1 +; CHECK-NEXT: vpcmpeqq %xmm +; CHECK-NEXT: vpcmpeqq %xmm +; CHECK-NEXT: vinsertf128 $1 +define <4 x i64> @v4i64-cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone { + %bincmp = icmp eq <4 x i64> %i, %j + %x = sext <4 x i1> %bincmp to <4 x i64> + ret <4 x i64> %x +} + +; CHECK: vextractf128 $1 +; CHECK: vextractf128 $1 +; CHECK-NEXT: vpcmpeqw %xmm +; CHECK-NEXT: vpcmpeqw %xmm +; CHECK-NEXT: vinsertf128 $1 +define <16 x i16> @v16i16-cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone { + %bincmp = icmp eq <16 x i16> %i, %j + %x = sext <16 x i1> %bincmp to <16 x i16> + ret <16 x i16> %x +} + +; CHECK: vextractf128 $1 +; CHECK: vextractf128 $1 +; CHECK-NEXT: vpcmpeqb %xmm +; CHECK-NEXT: vpcmpeqb %xmm +; CHECK-NEXT: vinsertf128 $1 +define <32 x i8> @v32i8-cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind readnone { + %bincmp = icmp eq <32 x i8> %i, %j + %x = sext <32 x i1> %bincmp to <32 x i8> + ret <32 x i8> %x +} + +;; Scalar comparison + +; CHECK: scalarcmpA +; CHECK: vcmpeqsd +define i32 @scalarcmpA() uwtable ssp { + %cmp29 = fcmp oeq double undef, 0.000000e+00 + %res = zext i1 %cmp29 to i32 + ret i32 %res +} + +; CHECK: scalarcmpB +; CHECK: vcmpeqss +define i32 @scalarcmpB() uwtable ssp { + %cmp29 = fcmp oeq float undef, 0.000000e+00 + %res = zext i1 %cmp29 to i32 + ret i32 %res +} + diff --git a/test/CodeGen/X86/avx-128.ll b/test/CodeGen/X86/avx-cvt.ll index fe1472f..6c0bd58 100644 --- a/test/CodeGen/X86/avx-128.ll +++ b/test/CodeGen/X86/avx-cvt.ll @@ -1,24 +1,41 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s -@z = common global <4 x float> zeroinitializer, align 16 +; CHECK: vcvtdq2ps %ymm +define <8 x float> @sitofp00(<8 x i32> %a) nounwind { + %b = sitofp <8 x i32> %a to <8 x float> + ret <8 x float> %b +} -define void @zero() nounwind ssp { -entry: - ; CHECK: vxorps - ; CHECK: vmovaps - store <4 x float> zeroinitializer, <4 x float>* @z, align 16 - ret void +; CHECK: vcvttps2dq %ymm +define <8 x i32> @fptosi00(<8 x float> %a) nounwind { + %b = fptosi <8 x float> %a to <8 x i32> + ret <8 x i32> %b } -define void @fpext() nounwind uwtable { -entry: - %f = alloca float, align 4 - %d = alloca double, align 8 - %tmp = load float* %f, align 4 - ; CHECK: vcvtss2sd - %conv = fpext float %tmp to double - store double %conv, double* %d, align 8 - ret void +; CHECK: vcvtdq2pd %xmm +define <4 x double> @sitofp01(<4 x i32> %a) { + %b = sitofp <4 x i32> %a to <4 x double> + ret <4 x double> %b +} + +; CHECK: vcvtpd2dqy %ymm +define <4 x i32> @fptosi01(<4 x double> %a) { + %b = fptosi <4 x double> %a to <4 x i32> + ret <4 x i32> %b +} + +; CHECK: vcvtpd2psy %ymm +; CHECK-NEXT: vcvtpd2psy %ymm +; CHECK-NEXT: vinsertf128 $1 +define <8 x float> @fptrunc00(<8 x double> %b) nounwind { + %a = fptrunc <8 x double> %b to <8 x float> + ret <8 x float> %a +} + +; CHECK: vcvtps2pd %xmm +define <4 x double> @fpext00(<4 x float> %b) nounwind { + %a = fpext <4 x float> %b to <4 x double> + ret <4 x double> %a } ; CHECK: vcvtsi2sdq (% @@ -53,20 +70,14 @@ entry: ret float %conv } -; CHECK: vsqrtss -define float @sqrtA(float %a) nounwind uwtable readnone ssp { -entry: - %conv1 = tail call float @sqrtf(float %a) nounwind readnone - ret float %conv1 -} - -declare double @sqrt(double) readnone - -; CHECK: vsqrtsd -define double @sqrtB(double %a) nounwind uwtable readnone ssp { +; CHECK: vcvtss2sd +define void @fpext() nounwind uwtable { entry: - %call = tail call double @sqrt(double %a) nounwind readnone - ret double %call + %f = alloca float, align 4 + %d = alloca double, align 8 + %tmp = load float* %f, align 4 + %conv = fpext float %tmp to double + store double %conv, double* %d, align 8 + ret void } -declare float @sqrtf(float) readnone diff --git a/test/CodeGen/X86/avx-load-store.ll b/test/CodeGen/X86/avx-load-store.ll index 5196089..07a63ef 100644 --- a/test/CodeGen/X86/avx-load-store.ll +++ b/test/CodeGen/X86/avx-load-store.ll @@ -1,9 +1,10 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s +; RUN: llc -O0 < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s -check-prefix=CHECK_O0 ; CHECK: vmovaps ; CHECK: vmovaps -; CHECK: vmovapd -; CHECK: vmovapd +; CHECK: vmovaps +; CHECK: vmovaps ; CHECK: vmovaps ; CHECK: vmovaps define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind uwtable ssp { @@ -22,3 +23,83 @@ entry: declare void @dummy(<4 x double>, <8 x float>, <4 x i64>) +;; +;; The two tests below check that we must fold load + scalar_to_vector +;; + ins_subvec+ zext into only a single vmovss or vmovsd + +; CHECK: vmovss (% +define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind { + %val = load float* %ptr + %i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0 + ret <8 x float> %i0 +} + +; CHECK: vmovsd (% +define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind { + %val = load double* %ptr + %i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0 + ret <4 x double> %i0 +} + +; CHECK: vmovaps %ymm +define void @storev16i16(<16 x i16> %a) nounwind { + store <16 x i16> %a, <16 x i16>* undef, align 32 + unreachable +} + +; CHECK: vmovups %ymm +define void @storev16i16_01(<16 x i16> %a) nounwind { + store <16 x i16> %a, <16 x i16>* undef, align 4 + unreachable +} + +; CHECK: vmovaps %ymm +define void @storev32i8(<32 x i8> %a) nounwind { + store <32 x i8> %a, <32 x i8>* undef, align 32 + unreachable +} + +; CHECK: vmovups %ymm +define void @storev32i8_01(<32 x i8> %a) nounwind { + store <32 x i8> %a, <32 x i8>* undef, align 4 + unreachable +} + +; It is faster to make two saves, if the data is already in XMM registers. For +; example, after making an integer operation. +; CHECK: _double_save +; CHECK-NOT: vinsertf128 $1 +; CHECK-NOT: vinsertf128 $0 +; CHECK: vmovaps %xmm +; CHECK: vmovaps %xmm +define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp { +entry: + %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + store <8 x i32> %Z, <8 x i32>* %P, align 16 + ret void +} + +declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x float>) nounwind + +; CHECK_O0: _f_f +; CHECK-O0: vmovss LCPI +; CHECK-O0: vxorps %xmm +; CHECK-O0: vmovss %xmm +define void @f_f() nounwind { +allocas: + br i1 undef, label %cif_mask_all, label %cif_mask_mixed + +cif_mask_all: ; preds = %allocas + unreachable + +cif_mask_mixed: ; preds = %allocas + br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check + +cif_mixed_test_all: ; preds = %cif_mask_mixed + call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x float> <float 0xFFFFFFFFE0000000, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, <8 x float> undef) nounwind + unreachable + +cif_mixed_test_any_check: ; preds = %cif_mask_mixed + unreachable +} + diff --git a/test/CodeGen/X86/avx-256-logic.ll b/test/CodeGen/X86/avx-logic.ll index d9e5d08..518c09c 100644 --- a/test/CodeGen/X86/avx-256-logic.ll +++ b/test/CodeGen/X86/avx-logic.ll @@ -159,3 +159,21 @@ entry: %2 = bitcast <8 x i32> %and.i to <8 x float> ret <8 x float> %2 } + +;;; Test that basic 2 x i64 logic use the integer version on AVX + +; CHECK: vpandn %xmm +define <2 x i64> @vpandn(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp { +entry: + %y = xor <2 x i64> %a, <i64 -1, i64 -1> + %x = and <2 x i64> %a, %y + ret <2 x i64> %x +} + +; CHECK: vpand %xmm +define <2 x i64> @vpand(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp { +entry: + %x = and <2 x i64> %a, %b + ret <2 x i64> %x +} + diff --git a/test/CodeGen/X86/avx-minmax.ll b/test/CodeGen/X86/avx-minmax.ll new file mode 100644 index 0000000..f36ba7b --- /dev/null +++ b/test/CodeGen/X86/avx-minmax.ll @@ -0,0 +1,65 @@ +; RUN: llc < %s -march=x86-64 -mattr=+avx -asm-verbose=false -join-physregs -enable-unsafe-fp-math -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=UNSAFE %s + +; UNSAFE: maxpd: +; UNSAFE: vmaxpd {{.+}}, %xmm +define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) { + %max_is_x = fcmp oge <2 x double> %x, %y + %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y + ret <2 x double> %max +} + +; UNSAFE: minpd: +; UNSAFE: vminpd {{.+}}, %xmm +define <2 x double> @minpd(<2 x double> %x, <2 x double> %y) { + %min_is_x = fcmp ole <2 x double> %x, %y + %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y + ret <2 x double> %min +} + +; UNSAFE: maxps: +; UNSAFE: vmaxps {{.+}}, %xmm +define <4 x float> @maxps(<4 x float> %x, <4 x float> %y) { + %max_is_x = fcmp oge <4 x float> %x, %y + %max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y + ret <4 x float> %max +} + +; UNSAFE: minps: +; UNSAFE: vminps {{.+}}, %xmm +define <4 x float> @minps(<4 x float> %x, <4 x float> %y) { + %min_is_x = fcmp ole <4 x float> %x, %y + %min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y + ret <4 x float> %min +} + +; UNSAFE: vmaxpd: +; UNSAFE: vmaxpd %ymm +define <4 x double> @vmaxpd(<4 x double> %x, <4 x double> %y) { + %max_is_x = fcmp oge <4 x double> %x, %y + %max = select <4 x i1> %max_is_x, <4 x double> %x, <4 x double> %y + ret <4 x double> %max +} + +; UNSAFE: vminpd: +; UNSAFE: vminpd %ymm +define <4 x double> @vminpd(<4 x double> %x, <4 x double> %y) { + %min_is_x = fcmp ole <4 x double> %x, %y + %min = select <4 x i1> %min_is_x, <4 x double> %x, <4 x double> %y + ret <4 x double> %min +} + +; UNSAFE: vmaxps: +; UNSAFE: vmaxps %ymm +define <8 x float> @vmaxps(<8 x float> %x, <8 x float> %y) { + %max_is_x = fcmp oge <8 x float> %x, %y + %max = select <8 x i1> %max_is_x, <8 x float> %x, <8 x float> %y + ret <8 x float> %max +} + +; UNSAFE: vminps: +; UNSAFE: vminps %ymm +define <8 x float> @vminps(<8 x float> %x, <8 x float> %y) { + %min_is_x = fcmp ole <8 x float> %x, %y + %min = select <8 x i1> %min_is_x, <8 x float> %x, <8 x float> %y + ret <8 x float> %min +} diff --git a/test/CodeGen/X86/avx-movdup.ll b/test/CodeGen/X86/avx-movdup.ll new file mode 100644 index 0000000..42d84de --- /dev/null +++ b/test/CodeGen/X86/avx-movdup.ll @@ -0,0 +1,34 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; CHECK: vmovsldup +define <8 x float> @movdupA(<8 x float> %src) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <8 x float> %src, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6> + ret <8 x float> %shuffle.i +} + +; CHECK: vmovshdup +define <8 x float> @movdupB(<8 x float> %src) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <8 x float> %src, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7> + ret <8 x float> %shuffle.i +} + +; CHECK: vmovsldup +define <4 x i64> @movdupC(<4 x i64> %src) nounwind uwtable readnone ssp { +entry: + %0 = bitcast <4 x i64> %src to <8 x float> + %shuffle.i = shufflevector <8 x float> %0, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6> + %1 = bitcast <8 x float> %shuffle.i to <4 x i64> + ret <4 x i64> %1 +} + +; CHECK: vmovshdup +define <4 x i64> @movdupD(<4 x i64> %src) nounwind uwtable readnone ssp { +entry: + %0 = bitcast <4 x i64> %src to <8 x float> + %shuffle.i = shufflevector <8 x float> %0, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7> + %1 = bitcast <8 x float> %shuffle.i to <4 x i64> + ret <4 x i64> %1 +} + diff --git a/test/CodeGen/X86/avx-select.ll b/test/CodeGen/X86/avx-select.ll new file mode 100644 index 0000000..58a75ef --- /dev/null +++ b/test/CodeGen/X86/avx-select.ll @@ -0,0 +1,22 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; CHECK: _select00 +; CHECK: vmovaps +; CHECK-NEXT: LBB +define <8 x i32> @select00(i32 %a, <8 x i32> %b) nounwind { + %cmpres = icmp eq i32 %a, 255 + %selres = select i1 %cmpres, <8 x i32> zeroinitializer, <8 x i32> %b + %res = xor <8 x i32> %b, %selres + ret <8 x i32> %res +} + +; CHECK: _select01 +; CHECK: vmovaps +; CHECK-NEXT: LBB +define <4 x i64> @select01(i32 %a, <4 x i64> %b) nounwind { + %cmpres = icmp eq i32 %a, 255 + %selres = select i1 %cmpres, <4 x i64> zeroinitializer, <4 x i64> %b + %res = xor <4 x i64> %b, %selres + ret <4 x i64> %res +} + diff --git a/test/CodeGen/X86/avx-shift.ll b/test/CodeGen/X86/avx-shift.ll new file mode 100644 index 0000000..3ea39a2 --- /dev/null +++ b/test/CodeGen/X86/avx-shift.ll @@ -0,0 +1,75 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +;;; Shift left +; CHECK: vpslld +; CHECK: vpslld +define <8 x i32> @vshift00(<8 x i32> %a) nounwind readnone { + %s = shl <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 +2> + ret <8 x i32> %s +} + +; CHECK: vpsllw +; CHECK: vpsllw +define <16 x i16> @vshift01(<16 x i16> %a) nounwind readnone { + %s = shl <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> + ret <16 x i16> %s +} + +; CHECK: vpsllq +; CHECK: vpsllq +define <4 x i64> @vshift02(<4 x i64> %a) nounwind readnone { + %s = shl <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2> + ret <4 x i64> %s +} + +;;; Logical Shift right +; CHECK: vpsrld +; CHECK: vpsrld +define <8 x i32> @vshift03(<8 x i32> %a) nounwind readnone { + %s = lshr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 +2> + ret <8 x i32> %s +} + +; CHECK: vpsrlw +; CHECK: vpsrlw +define <16 x i16> @vshift04(<16 x i16> %a) nounwind readnone { + %s = lshr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> + ret <16 x i16> %s +} + +; CHECK: vpsrlq +; CHECK: vpsrlq +define <4 x i64> @vshift05(<4 x i64> %a) nounwind readnone { + %s = lshr <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2> + ret <4 x i64> %s +} + +;;; Arithmetic Shift right +; CHECK: vpsrad +; CHECK: vpsrad +define <8 x i32> @vshift06(<8 x i32> %a) nounwind readnone { + %s = ashr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 +2> + ret <8 x i32> %s +} + +; CHECK: vpsraw +; CHECK: vpsraw +define <16 x i16> @vshift07(<16 x i16> %a) nounwind readnone { + %s = ashr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> + ret <16 x i16> %s +} + +;;; Support variable shifts +; CHECK: _vshift08 +; CHECK: vextractf128 $1 +; CHECK: vpslld $23 +; CHECK: vextractf128 $1 +; CHECK: vpslld $23 +define <8 x i32> @vshift08(<8 x i32> %a) nounwind { + %bitop = shl <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %a + ret <8 x i32> %bitop +} + diff --git a/test/CodeGen/X86/avx-shuffle.ll b/test/CodeGen/X86/avx-shuffle.ll new file mode 100644 index 0000000..0db334d --- /dev/null +++ b/test/CodeGen/X86/avx-shuffle.ll @@ -0,0 +1,10 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; PR11102 +define <4 x float> @test1(<4 x float> %a) nounwind { + %b = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 2, i32 5, i32 undef, i32 undef> + ret <4 x float> %b +; CHECK: test1: +; CHECK: vshufps +; CHECK: vpshufd +} diff --git a/test/CodeGen/X86/avx-splat.ll b/test/CodeGen/X86/avx-splat.ll new file mode 100644 index 0000000..af20b90 --- /dev/null +++ b/test/CodeGen/X86/avx-splat.ll @@ -0,0 +1,103 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + + +; CHECK: vpunpcklbw %xmm +; CHECK-NEXT: vpunpckhbw %xmm +; CHECK-NEXT: vinsertf128 $1 +; CHECK-NEXT: vpermilps $85 +define <32 x i8> @funcA(<32 x i8> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5> + ret <32 x i8> %shuffle +} + +; CHECK: vpunpckhwd %xmm +; CHECK-NEXT: vinsertf128 $1 +; CHECK-NEXT: vpermilps $85 +define <16 x i16> @funcB(<16 x i16> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5> + ret <16 x i16> %shuffle +} + +; CHECK: vmovd +; CHECK-NEXT: vmovlhps %xmm +; CHECK-NEXT: vinsertf128 $1 +define <4 x i64> @funcC(i64 %q) nounwind uwtable readnone ssp { +entry: + %vecinit.i = insertelement <4 x i64> undef, i64 %q, i32 0 + %vecinit2.i = insertelement <4 x i64> %vecinit.i, i64 %q, i32 1 + %vecinit4.i = insertelement <4 x i64> %vecinit2.i, i64 %q, i32 2 + %vecinit6.i = insertelement <4 x i64> %vecinit4.i, i64 %q, i32 3 + ret <4 x i64> %vecinit6.i +} + +; CHECK: vshufpd $0 +; CHECK-NEXT: vinsertf128 $1 +define <4 x double> @funcD(double %q) nounwind uwtable readnone ssp { +entry: + %vecinit.i = insertelement <4 x double> undef, double %q, i32 0 + %vecinit2.i = insertelement <4 x double> %vecinit.i, double %q, i32 1 + %vecinit4.i = insertelement <4 x double> %vecinit2.i, double %q, i32 2 + %vecinit6.i = insertelement <4 x double> %vecinit4.i, double %q, i32 3 + ret <4 x double> %vecinit6.i +} + +; Test this simple opt: +; shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> +; To: +; shuffle (vload ptr)), undef, <1, 1, 1, 1> +; CHECK: vmovdqa +; CHECK-NEXT: vinsertf128 $1 +; CHECK-NEXT: vpermilps $-1 +define <8 x float> @funcE() nounwind { +allocas: + %udx495 = alloca [18 x [18 x float]], align 32 + br label %for_test505.preheader + +for_test505.preheader: ; preds = %for_test505.preheader, %allocas + br i1 undef, label %for_exit499, label %for_test505.preheader + +for_exit499: ; preds = %for_test505.preheader + br i1 undef, label %__load_and_broadcast_32.exit1249, label %load.i1247 + +load.i1247: ; preds = %for_exit499 + %ptr1227 = getelementptr [18 x [18 x float]]* %udx495, i64 0, i64 1, i64 1 + %ptr.i1237 = bitcast float* %ptr1227 to i32* + %val.i1238 = load i32* %ptr.i1237, align 4 + %ret6.i1245 = insertelement <8 x i32> undef, i32 %val.i1238, i32 6 + %ret7.i1246 = insertelement <8 x i32> %ret6.i1245, i32 %val.i1238, i32 7 + %phitmp = bitcast <8 x i32> %ret7.i1246 to <8 x float> + br label %__load_and_broadcast_32.exit1249 + +__load_and_broadcast_32.exit1249: ; preds = %load.i1247, %for_exit499 + %load_broadcast12281250 = phi <8 x float> [ %phitmp, %load.i1247 ], [ undef, %for_exit499 ] + ret <8 x float> %load_broadcast12281250 +} + +; CHECK: vinsertf128 $1 +; CHECK-NEXT: vpermilps $0 +define <8 x float> @funcF(i32 %val) nounwind { + %ret6 = insertelement <8 x i32> undef, i32 %val, i32 6 + %ret7 = insertelement <8 x i32> %ret6, i32 %val, i32 7 + %tmp = bitcast <8 x i32> %ret7 to <8 x float> + ret <8 x float> %tmp +} + +; CHECK: vinsertf128 $1 +; CHECK-NEXT: vpermilps $0 +define <8 x float> @funcG(<8 x float> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> + ret <8 x float> %shuffle +} + +; CHECK: vextractf128 $1 +; CHECK-NEXT: vinsertf128 $1 +; CHECK-NEXT: vpermilps $85 +define <8 x float> @funcH(<8 x float> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5> + ret <8 x float> %shuffle +} + diff --git a/test/CodeGen/X86/avx-unpack.ll b/test/CodeGen/X86/avx-unpack.ll new file mode 100644 index 0000000..d420101 --- /dev/null +++ b/test/CodeGen/X86/avx-unpack.ll @@ -0,0 +1,89 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; CHECK: vunpckhps +define <8 x float> @unpackhips(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15> + ret <8 x float> %shuffle.i +} + +; CHECK: vunpckhpd +define <4 x double> @unpackhipd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7> + ret <4 x double> %shuffle.i +} + +; CHECK: vunpcklps +define <8 x float> @unpacklops(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13> + ret <8 x float> %shuffle.i +} + +; CHECK: vunpcklpd +define <4 x double> @unpacklopd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6> + ret <4 x double> %shuffle.i +} + +; CHECK-NOT: vunpcklps %ymm +define <8 x float> @unpacklops-not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> + ret <8 x float> %shuffle.i +} + +; CHECK-NOT: vunpcklpd %ymm +define <4 x double> @unpacklopd-not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 0, i32 4, i32 1, i32 5> + ret <4 x double> %shuffle.i +} + +; CHECK-NOT: vunpckhps %ymm +define <8 x float> @unpackhips-not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13> + ret <8 x float> %shuffle.i +} + +; CHECK-NOT: vunpckhpd %ymm +define <4 x double> @unpackhipd-not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 2, i32 6, i32 3, i32 7> + ret <4 x double> %shuffle.i +} + +;;;; +;;;; Unpack versions using the fp unit for int unpacking +;;;; + +; CHECK: vunpckhps +define <8 x i32> @unpackhips1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15> + ret <8 x i32> %shuffle.i +} + +; CHECK: vunpckhpd +define <4 x i64> @unpackhipd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7> + ret <4 x i64> %shuffle.i +} + +; CHECK: vunpcklps +define <8 x i32> @unpacklops1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13> + ret <8 x i32> %shuffle.i +} + +; CHECK: vunpcklpd +define <4 x i64> @unpacklopd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp { +entry: + %shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6> + ret <4 x i64> %shuffle.i +} diff --git a/test/CodeGen/X86/avx-vbroadcast.ll b/test/CodeGen/X86/avx-vbroadcast.ll new file mode 100644 index 0000000..89b4188 --- /dev/null +++ b/test/CodeGen/X86/avx-vbroadcast.ll @@ -0,0 +1,94 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s +; XFAIL: * + +; xfail this file for now because of PR8156, when it gets solved merge this with avx-splat.ll + +; CHECK: vbroadcastsd (% +define <4 x i64> @A(i64* %ptr) nounwind uwtable readnone ssp { +entry: + %q = load i64* %ptr, align 8 + %vecinit.i = insertelement <4 x i64> undef, i64 %q, i32 0 + %vecinit2.i = insertelement <4 x i64> %vecinit.i, i64 %q, i32 1 + %vecinit4.i = insertelement <4 x i64> %vecinit2.i, i64 %q, i32 2 + %vecinit6.i = insertelement <4 x i64> %vecinit4.i, i64 %q, i32 3 + ret <4 x i64> %vecinit6.i +} + +; CHECK: vbroadcastss (% +define <8 x i32> @B(i32* %ptr) nounwind uwtable readnone ssp { +entry: + %q = load i32* %ptr, align 4 + %vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0 + %vecinit2.i = insertelement <8 x i32> %vecinit.i, i32 %q, i32 1 + %vecinit4.i = insertelement <8 x i32> %vecinit2.i, i32 %q, i32 2 + %vecinit6.i = insertelement <8 x i32> %vecinit4.i, i32 %q, i32 3 + ret <8 x i32> %vecinit6.i +} + +; CHECK: vbroadcastsd (% +define <4 x double> @C(double* %ptr) nounwind uwtable readnone ssp { +entry: + %q = load double* %ptr, align 8 + %vecinit.i = insertelement <4 x double> undef, double %q, i32 0 + %vecinit2.i = insertelement <4 x double> %vecinit.i, double %q, i32 1 + %vecinit4.i = insertelement <4 x double> %vecinit2.i, double %q, i32 2 + %vecinit6.i = insertelement <4 x double> %vecinit4.i, double %q, i32 3 + ret <4 x double> %vecinit6.i +} + +; CHECK: vbroadcastss (% +define <8 x float> @D(float* %ptr) nounwind uwtable readnone ssp { +entry: + %q = load float* %ptr, align 4 + %vecinit.i = insertelement <8 x float> undef, float %q, i32 0 + %vecinit2.i = insertelement <8 x float> %vecinit.i, float %q, i32 1 + %vecinit4.i = insertelement <8 x float> %vecinit2.i, float %q, i32 2 + %vecinit6.i = insertelement <8 x float> %vecinit4.i, float %q, i32 3 + ret <8 x float> %vecinit6.i +} + +;;;; 128-bit versions + +; CHECK: vbroadcastss (% +define <4 x float> @E(float* %ptr) nounwind uwtable readnone ssp { +entry: + %q = load float* %ptr, align 4 + %vecinit.i = insertelement <4 x float> undef, float %q, i32 0 + %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1 + %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2 + %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3 + ret <4 x float> %vecinit6.i +} + +; CHECK: vbroadcastss (% +define <4 x i32> @F(i32* %ptr) nounwind uwtable readnone ssp { +entry: + %q = load i32* %ptr, align 4 + %vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0 + %vecinit2.i = insertelement <4 x i32> %vecinit.i, i32 %q, i32 1 + %vecinit4.i = insertelement <4 x i32> %vecinit2.i, i32 %q, i32 2 + %vecinit6.i = insertelement <4 x i32> %vecinit4.i, i32 %q, i32 3 + ret <4 x i32> %vecinit6.i +} + +; Unsupported vbroadcasts + +; CHECK: _G +; CHECK-NOT: vbroadcastsd (% +; CHECK: ret +define <2 x i64> @G(i64* %ptr) nounwind uwtable readnone ssp { +entry: + %q = load i64* %ptr, align 8 + %vecinit.i = insertelement <2 x i64> undef, i64 %q, i32 0 + %vecinit2.i = insertelement <2 x i64> %vecinit.i, i64 %q, i32 1 + ret <2 x i64> %vecinit2.i +} + +; CHECK: _H +; CHECK-NOT: vbroadcastss +; CHECK: ret +define <4 x i32> @H(<4 x i32> %a) { + %x = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef> + ret <4 x i32> %x +} + diff --git a/test/CodeGen/X86/avx-vextractf128.ll b/test/CodeGen/X86/avx-vextractf128.ll new file mode 100644 index 0000000..dccf901b --- /dev/null +++ b/test/CodeGen/X86/avx-vextractf128.ll @@ -0,0 +1,18 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; CHECK-NOT: vunpck +; CHECK: vextractf128 $1 +define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 8, i32 8, i32 8> + ret <8 x float> %shuffle +} + +; CHECK-NOT: vunpck +; CHECK: vextractf128 $1 +define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 4> + ret <4 x double> %shuffle +} + diff --git a/test/CodeGen/X86/avx-vinsertf128.ll b/test/CodeGen/X86/avx-vinsertf128.ll new file mode 100644 index 0000000..cda1331 --- /dev/null +++ b/test/CodeGen/X86/avx-vinsertf128.ll @@ -0,0 +1,58 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck -check-prefix=CHECK-SSE %s + +; CHECK-NOT: vunpck +; CHECK: vinsertf128 $1 +define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3> + ret <8 x float> %shuffle +} + +; CHECK-NOT: vunpck +; CHECK: vinsertf128 $1 +define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 1> + ret <4 x double> %shuffle +} + +declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone + +declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone + +; Just check that no crash happens +; CHECK-SSE: _insert_crash +define void @insert_crash() nounwind { +allocas: + %v1.i.i451 = shufflevector <4 x double> zeroinitializer, <4 x double> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef> + %ret_0a.i.i.i452 = shufflevector <4 x double> %v1.i.i451, <4 x double> undef, <2 x i32> <i32 0, i32 1> + %vret_0.i.i.i454 = tail call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %ret_0a.i.i.i452, <2 x double> undef) nounwind + %ret_val.i.i.i463 = tail call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %vret_0.i.i.i454, <2 x double> undef) nounwind + %ret.i1.i.i464 = extractelement <2 x double> %ret_val.i.i.i463, i32 0 + %double2float = fptrunc double %ret.i1.i.i464 to float + %smearinsert50 = insertelement <4 x float> undef, float %double2float, i32 3 + %blendAsInt.i503 = bitcast <4 x float> %smearinsert50 to <4 x i32> + store <4 x i32> %blendAsInt.i503, <4 x i32>* undef, align 4 + ret void +} + +;; DAG Combine must remove useless vinsertf128 instructions + +; CHECK: DAGCombineA +; CHECK-NOT: vinsertf128 $1 +define <4 x i32> @DAGCombineA(<4 x i32> %v1) nounwind readonly { + %1 = shufflevector <4 x i32> %v1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + ret <4 x i32> %2 +} + +; CHECK: DAGCombineB +; CHECK: vpaddd %xmm +; CHECK-NOT: vinsertf128 $1 +; CHECK: vpaddd %xmm +define <8 x i32> @DAGCombineB(<8 x i32> %v1, <8 x i32> %v2) nounwind readonly { + %1 = add <8 x i32> %v1, %v2 + %2 = add <8 x i32> %1, %v1 + ret <8 x i32> %2 +} diff --git a/test/CodeGen/X86/avx-vmovddup.ll b/test/CodeGen/X86/avx-vmovddup.ll new file mode 100644 index 0000000..1c56fe2 --- /dev/null +++ b/test/CodeGen/X86/avx-vmovddup.ll @@ -0,0 +1,14 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; CHECK: vmovddup %ymm +define <4 x i64> @A(<4 x i64> %a) { + %c = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2> + ret <4 x i64> %c +} + +; CHECK: vmovddup (% +define <4 x i64> @B(<4 x i64>* %ptr) { + %a = load <4 x i64>* %ptr + %c = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2> + ret <4 x i64> %c +} diff --git a/test/CodeGen/X86/avx-vperm2f128.ll b/test/CodeGen/X86/avx-vperm2f128.ll new file mode 100644 index 0000000..3550a90 --- /dev/null +++ b/test/CodeGen/X86/avx-vperm2f128.ll @@ -0,0 +1,62 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; CHECK: vperm2f128 $1 +define <8 x float> @A(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3> + ret <8 x float> %shuffle +} + +; CHECK: vperm2f128 $48 +define <8 x float> @B(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15> + ret <8 x float> %shuffle +} + +; CHECK: vperm2f128 $0 +define <8 x float> @C(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> + ret <8 x float> %shuffle +} + +; CHECK: vperm2f128 $17 +define <8 x float> @D(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7> + ret <8 x float> %shuffle +} + +; CHECK: vperm2f128 $17 +define <32 x i8> @E(<32 x i8> %a, <32 x i8> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> + ret <32 x i8> %shuffle +} + +; CHECK: vperm2f128 $33 +define <4 x i64> @E2(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 6, i32 7, i32 0, i32 1> + ret <4 x i64> %shuffle +} + +;;;; Cases with undef indicies mixed in the mask + +; CHECK: vperm2f128 $33 +define <8 x float> @F(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 undef, i32 undef, i32 6, i32 7, i32 undef, i32 9, i32 undef, i32 11> + ret <8 x float> %shuffle +} + +;;;; Cases we must not select vperm2f128 + +; CHECK: _G +; CHECK-NOT: vperm2f128 +define <8 x float> @G(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 undef, i32 undef, i32 6, i32 7, i32 undef, i32 12, i32 undef, i32 15> + ret <8 x float> %shuffle +} diff --git a/test/CodeGen/X86/avx-vpermil.ll b/test/CodeGen/X86/avx-vpermil.ll new file mode 100644 index 0000000..49b2f54 --- /dev/null +++ b/test/CodeGen/X86/avx-vpermil.ll @@ -0,0 +1,45 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; CHECK: vpermilps +define <8 x float> @funcA(<8 x float> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 1, i32 5, i32 6, i32 7, i32 5> + ret <8 x float> %shuffle +} + +; CHECK: vpermilpd +define <4 x double> @funcB(<4 x double> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 3> + ret <4 x double> %shuffle +} + +; CHECK: vpermilps +define <8 x i32> @funcC(<8 x i32> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 1, i32 5, i32 6, i32 7, i32 5> + ret <8 x i32> %shuffle +} + +; CHECK: vpermilpd +define <4 x i64> @funcD(<4 x i64> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 3> + ret <4 x i64> %shuffle +} + +; vpermil should match masks like this: <u,3,1,2,4,u,5,6>. Check that the +; target specific mask was correctly generated. +; CHECK: vpermilps $-100 +define <8 x float> @funcE(<8 x float> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 8, i32 3, i32 1, i32 2, i32 4, i32 8, i32 5, i32 6> + ret <8 x float> %shuffle +} + +; CHECK-NOT: vpermilps +define <8 x float> @funcF(<8 x float> %a) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> zeroinitializer, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9> + ret <8 x float> %shuffle +} diff --git a/test/CodeGen/X86/avx-vshufp.ll b/test/CodeGen/X86/avx-vshufp.ll new file mode 100644 index 0000000..f06548d --- /dev/null +++ b/test/CodeGen/X86/avx-vshufp.ll @@ -0,0 +1,29 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +; CHECK: vshufps $-53, %ymm +define <8 x float> @A(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 2, i32 8, i32 11, i32 7, i32 6, i32 12, i32 15> + ret <8 x float> %shuffle +} + +; CHECK: vshufpd $10, %ymm +define <4 x double> @B(<4 x double> %a, <4 x double> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7> + ret <4 x double> %shuffle +} + +; CHECK: vshufps $-53, %ymm +define <8 x float> @C(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 undef, i32 undef, i32 11, i32 undef, i32 6, i32 12, i32 undef> + ret <8 x float> %shuffle +} + +; CHECK: vshufpd $2, %ymm +define <4 x double> @D(<4 x double> %a, <4 x double> %b) nounwind uwtable readnone ssp { +entry: + %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 undef> + ret <4 x double> %shuffle +} diff --git a/test/CodeGen/X86/avx-vzeroupper.ll b/test/CodeGen/X86/avx-vzeroupper.ll new file mode 100644 index 0000000..eaf236c --- /dev/null +++ b/test/CodeGen/X86/avx-vzeroupper.ll @@ -0,0 +1,26 @@ +; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s + +define <4 x float> @do_sse_local(<4 x float> %a) nounwind uwtable readnone ssp { +entry: + %add.i = fadd <4 x float> %a, %a + ret <4 x float> %add.i +} + +; CHECK: _test00 +define <4 x float> @test00(<4 x float> %a, <4 x float> %b) nounwind uwtable ssp { +entry: + %add.i = fadd <4 x float> %a, %b + ; CHECK: vzeroupper + ; CHECK-NEXT: callq _do_sse + %call3 = tail call <4 x float> @do_sse(<4 x float> %add.i) nounwind + %sub.i = fsub <4 x float> %call3, %add.i + ; CHECK-NOT: vzeroupper + ; CHECK: callq _do_sse_local + %call8 = tail call <4 x float> @do_sse_local(<4 x float> %sub.i) + ; CHECK: vzeroupper + ; CHECK-NEXT: jmp _do_sse + %call10 = tail call <4 x float> @do_sse(<4 x float> %call8) nounwind + ret <4 x float> %call10 +} + +declare <4 x float> @do_sse(<4 x float>) diff --git a/test/CodeGen/X86/barrier-sse.ll b/test/CodeGen/X86/barrier-sse.ll index 6190c36..bbfeea6 100644 --- a/test/CodeGen/X86/barrier-sse.ll +++ b/test/CodeGen/X86/barrier-sse.ll @@ -3,19 +3,9 @@ ; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep mfence ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep MEMBARRIER - -declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1) - define void @test() { - call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 false, i1 false) - call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 false, i1 false) - call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 true, i1 false) - - call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 false, i1 false) - call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 true, i1 false) - call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 true, i1 false) - - call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true , i1 false) - call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 false , i1 false) - ret void + fence acquire + fence release + fence acq_rel + ret void } diff --git a/test/CodeGen/X86/barrier.ll b/test/CodeGen/X86/barrier.ll index fad6ef6..4769b39 100644 --- a/test/CodeGen/X86/barrier.ll +++ b/test/CodeGen/X86/barrier.ll @@ -1,7 +1,6 @@ ; RUN: llc < %s -march=x86 -mattr=-sse2 | grep lock -declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1) define void @test() { - call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 false, i1 false) + fence seq_cst ret void -}
\ No newline at end of file +} diff --git a/test/CodeGen/X86/bmi.ll b/test/CodeGen/X86/bmi.ll new file mode 100644 index 0000000..4b40d90 --- /dev/null +++ b/test/CodeGen/X86/bmi.ll @@ -0,0 +1,180 @@ +; RUN: llc < %s -march=x86-64 -mattr=+bmi,+bmi2 | FileCheck %s + +define i32 @t1(i32 %x) nounwind { + %tmp = tail call i32 @llvm.cttz.i32( i32 %x ) + ret i32 %tmp +; CHECK: t1: +; CHECK: tzcntl +} + +declare i32 @llvm.cttz.i32(i32) nounwind readnone + +define i16 @t2(i16 %x) nounwind { + %tmp = tail call i16 @llvm.cttz.i16( i16 %x ) + ret i16 %tmp +; CHECK: t2: +; CHECK: tzcntw +} + +declare i16 @llvm.cttz.i16(i16) nounwind readnone + +define i64 @t3(i64 %x) nounwind { + %tmp = tail call i64 @llvm.cttz.i64( i64 %x ) + ret i64 %tmp +; CHECK: t3: +; CHECK: tzcntq +} + +declare i64 @llvm.cttz.i64(i64) nounwind readnone + +define i8 @t4(i8 %x) nounwind { + %tmp = tail call i8 @llvm.cttz.i8( i8 %x ) + ret i8 %tmp +; CHECK: t4: +; CHECK: tzcntw +} + +declare i8 @llvm.cttz.i8(i8) nounwind readnone + +define i32 @andn32(i32 %x, i32 %y) nounwind readnone { + %tmp1 = xor i32 %x, -1 + %tmp2 = and i32 %y, %tmp1 + ret i32 %tmp2 +; CHECK: andn32: +; CHECK: andnl +} + +define i64 @andn64(i64 %x, i64 %y) nounwind readnone { + %tmp1 = xor i64 %x, -1 + %tmp2 = and i64 %tmp1, %y + ret i64 %tmp2 +; CHECK: andn64: +; CHECK: andnq +} + +define i32 @bextr32(i32 %x, i32 %y) nounwind readnone { + %tmp = tail call i32 @llvm.x86.bmi.bextr.32(i32 %x, i32 %y) + ret i32 %tmp +; CHECK: bextr32: +; CHECK: bextrl +} + +declare i32 @llvm.x86.bmi.bextr.32(i32, i32) nounwind readnone + +define i64 @bextr64(i64 %x, i64 %y) nounwind readnone { + %tmp = tail call i64 @llvm.x86.bmi.bextr.64(i64 %x, i64 %y) + ret i64 %tmp +; CHECK: bextr64: +; CHECK: bextrq +} + +declare i64 @llvm.x86.bmi.bextr.64(i64, i64) nounwind readnone + +define i32 @bzhi32(i32 %x, i32 %y) nounwind readnone { + %tmp = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %x, i32 %y) + ret i32 %tmp +; CHECK: bzhi32: +; CHECK: bzhil +} + +declare i32 @llvm.x86.bmi.bzhi.32(i32, i32) nounwind readnone + +define i64 @bzhi64(i64 %x, i64 %y) nounwind readnone { + %tmp = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %x, i64 %y) + ret i64 %tmp +; CHECK: bzhi64: +; CHECK: bzhiq +} + +declare i64 @llvm.x86.bmi.bzhi.64(i64, i64) nounwind readnone + +define i32 @blsi32(i32 %x) nounwind readnone { + %tmp = tail call i32 @llvm.x86.bmi.blsi.32(i32 %x) + ret i32 %tmp +; CHECK: blsi32: +; CHECK: blsil +} + +declare i32 @llvm.x86.bmi.blsi.32(i32) nounwind readnone + +define i64 @blsi64(i64 %x) nounwind readnone { + %tmp = tail call i64 @llvm.x86.bmi.blsi.64(i64 %x) + ret i64 %tmp +; CHECK: blsi64: +; CHECK: blsiq +} + +declare i64 @llvm.x86.bmi.blsi.64(i64) nounwind readnone + +define i32 @blsmsk32(i32 %x) nounwind readnone { + %tmp = tail call i32 @llvm.x86.bmi.blsmsk.32(i32 %x) + ret i32 %tmp +; CHECK: blsmsk32: +; CHECK: blsmskl +} + +declare i32 @llvm.x86.bmi.blsmsk.32(i32) nounwind readnone + +define i64 @blsmsk64(i64 %x) nounwind readnone { + %tmp = tail call i64 @llvm.x86.bmi.blsmsk.64(i64 %x) + ret i64 %tmp +; CHECK: blsmsk64: +; CHECK: blsmskq +} + +declare i64 @llvm.x86.bmi.blsmsk.64(i64) nounwind readnone + +define i32 @blsr32(i32 %x) nounwind readnone { + %tmp = tail call i32 @llvm.x86.bmi.blsr.32(i32 %x) + ret i32 %tmp +; CHECK: blsr32: +; CHECK: blsrl +} + +declare i32 @llvm.x86.bmi.blsr.32(i32) nounwind readnone + +define i64 @blsr64(i64 %x) nounwind readnone { + %tmp = tail call i64 @llvm.x86.bmi.blsr.64(i64 %x) + ret i64 %tmp +; CHECK: blsr64: +; CHECK: blsrq +} + +declare i64 @llvm.x86.bmi.blsr.64(i64) nounwind readnone + +define i32 @pdep32(i32 %x, i32 %y) nounwind readnone { + %tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 %y) + ret i32 %tmp +; CHECK: pdep32: +; CHECK: pdepl +} + +declare i32 @llvm.x86.bmi.pdep.32(i32, i32) nounwind readnone + +define i64 @pdep64(i64 %x, i64 %y) nounwind readnone { + %tmp = tail call i64 @llvm.x86.bmi.pdep.64(i64 %x, i64 %y) + ret i64 %tmp +; CHECK: pdep64: +; CHECK: pdepq +} + +declare i64 @llvm.x86.bmi.pdep.64(i64, i64) nounwind readnone + +define i32 @pext32(i32 %x, i32 %y) nounwind readnone { + %tmp = tail call i32 @llvm.x86.bmi.pext.32(i32 %x, i32 %y) + ret i32 %tmp +; CHECK: pext32: +; CHECK: pextl +} + +declare i32 @llvm.x86.bmi.pext.32(i32, i32) nounwind readnone + +define i64 @pext64(i64 %x, i64 %y) nounwind readnone { + %tmp = tail call i64 @llvm.x86.bmi.pext.64(i64 %x, i64 %y) + ret i64 %tmp +; CHECK: pext64: +; CHECK: pextq +} + +declare i64 @llvm.x86.bmi.pext.64(i64, i64) nounwind readnone + diff --git a/test/CodeGen/X86/bswap.ll b/test/CodeGen/X86/bswap.ll index a7540aa..d2d6f90 100644 --- a/test/CodeGen/X86/bswap.ll +++ b/test/CodeGen/X86/bswap.ll @@ -1,6 +1,6 @@ ; bswap should be constant folded when it is passed a constant argument -; RUN: llc < %s -march=x86 | FileCheck %s +; RUN: llc < %s -march=x86 -mcpu=i686 | FileCheck %s declare i16 @llvm.bswap.i16(i16) diff --git a/test/CodeGen/X86/change-compare-stride-0.ll b/test/CodeGen/X86/change-compare-stride-0.ll index 3a383ee..439f7b0 100644 --- a/test/CodeGen/X86/change-compare-stride-0.ll +++ b/test/CodeGen/X86/change-compare-stride-0.ll @@ -1,4 +1,7 @@ -; RUN: llc < %s -march=x86 | FileCheck %s +; RUN: llc < %s -march=x86 -enable-lsr-nested | FileCheck %s +; +; Nested LSR is required to optimize this case. +; We do not expect to see this form of IR without -enable-iv-rewrite. define void @borf(i8* nocapture %in, i8* nocapture %out) nounwind { ; CHECK: borf: diff --git a/test/CodeGen/X86/change-compare-stride-1.ll b/test/CodeGen/X86/change-compare-stride-1.ll index eee3b79..8b53ae2 100644 --- a/test/CodeGen/X86/change-compare-stride-1.ll +++ b/test/CodeGen/X86/change-compare-stride-1.ll @@ -1,4 +1,7 @@ -; RUN: llc < %s -march=x86-64 | FileCheck %s +; RUN: llc < %s -march=x86-64 -enable-lsr-nested | FileCheck %s +; +; Nested LSR is required to optimize this case. +; We do not expect to see this form of IR without -enable-iv-rewrite. define void @borf(i8* nocapture %in, i8* nocapture %out) nounwind { ; CHECK: borf: diff --git a/test/CodeGen/X86/cmov.ll b/test/CodeGen/X86/cmov.ll index 39d9d1e..7a8d6e6 100644 --- a/test/CodeGen/X86/cmov.ll +++ b/test/CodeGen/X86/cmov.ll @@ -90,8 +90,8 @@ bb.i.i.i: ; preds = %entry ; CHECK: test4: ; CHECK: g_100 ; CHECK: testb -; CHECK: testb %al, %al -; CHECK-NEXT: setne %al +; CHECK-NOT: xor +; CHECK: setne ; CHECK-NEXT: testb func_4.exit.i: ; preds = %bb.i.i.i, %entry diff --git a/test/CodeGen/X86/cmpxchg16b.ll b/test/CodeGen/X86/cmpxchg16b.ll new file mode 100644 index 0000000..ba1c4ef --- /dev/null +++ b/test/CodeGen/X86/cmpxchg16b.ll @@ -0,0 +1,13 @@ +; RUN: llc < %s -march=x86-64 -mcpu=core2 | FileCheck %s + +; Basic 128-bit cmpxchg +define void @t1(i128* nocapture %p) nounwind ssp { +entry: +; CHECK movl $1, %ebx +; CHECK: lock +; CHECK-NEXT: cmpxchg16b + %r = cmpxchg i128* %p, i128 0, i128 1 seq_cst + ret void +} + +; FIXME: Handle 128-bit atomicrmw/load atomic/store atomic diff --git a/test/CodeGen/X86/coalescer-dce.ll b/test/CodeGen/X86/coalescer-dce.ll new file mode 100644 index 0000000..7f72e3d --- /dev/null +++ b/test/CodeGen/X86/coalescer-dce.ll @@ -0,0 +1,80 @@ +; RUN: llc < %s -disable-fp-elim -disable-machine-dce -verify-coalescing +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" +target triple = "x86_64-apple-macosx10.7.0" + +; This test case has a sub-register join followed by a remat: +; +; 256L %vreg2<def> = COPY %vreg7:sub_32bit<kill>; GR32:%vreg2 GR64:%vreg7 +; Considering merging %vreg2 with %vreg7:sub_32bit +; Cross-class to GR64. +; RHS = %vreg2 = [256d,272d:0) 0@256d +; LHS = %vreg7 = [208d,256d:0)[304L,480L:0) 0@208d +; updated: 272L %vreg0<def> = COPY %vreg7:sub_32bit<kill>; GR32:%vreg0 GR64:%vreg7 +; Joined. Result = %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d +; +; 272L %vreg10:sub_32bit<def> = COPY %vreg7:sub_32bit<kill>, %vreg10<imp-def>; GR64:%vreg10,%vreg7 +; Considering merging %vreg7 with %vreg10 +; RHS = %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d +; LHS = %vreg10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4) 0@352d 1@64L-phidef 2@16d-phikill 3@272d-phikill 4@400d +; Remat: %vreg10<def> = MOV64r0 %vreg10<imp-def>, %EFLAGS<imp-def,dead>, %vreg10<imp-def>; GR64:%vreg10 +; Shrink: %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d +; live-in at 240L +; live-in at 416L +; live-in at 320L +; live-in at 304L +; Shrunk: %vreg7 = [208d,256d:0)[304L,480L:0) 0@208d +; +; The COPY at 256L is rewritten as a partial def, and that would artificially +; extend the live range of %vreg7 to end at 256d. When the joined copy is +; removed, -verify-coalescing complains about the dangling kill. +; +; <rdar://problem/9967101> + +define void @f1() nounwind uwtable ssp { +bb: + br label %bb1 + +bb1: + %tmp = phi i32 [ 0, %bb ], [ %tmp21, %bb20 ] + br label %bb2 + +bb2: + br i1 undef, label %bb5, label %bb8 + +bb4: + br i1 undef, label %bb2, label %bb20 + +bb5: + br i1 undef, label %bb4, label %bb20 + +bb8: + %tmp9 = phi i32 [ %tmp24, %bb23 ], [ 0, %bb2 ] + br i1 false, label %bb41, label %bb10 + +bb10: + %tmp11 = sub nsw i32 %tmp9, %tmp + br i1 false, label %bb2, label %bb26 + +bb20: + %tmp21 = phi i32 [ undef, %bb4 ], [ undef, %bb5 ], [ %tmp9, %bb27 ], [ undef, %bb32 ] + %tmp22 = phi i32 [ undef, %bb4 ], [ undef, %bb5 ], [ %tmp11, %bb27 ], [ undef, %bb32 ] + br label %bb1 + +bb23: + %tmp24 = add nsw i32 %tmp9, 1 + br label %bb8 + +bb26: + br i1 undef, label %bb27, label %bb32 + +bb27: + %tmp28 = zext i32 %tmp11 to i64 + %tmp30 = icmp eq i64 undef, %tmp28 + br i1 %tmp30, label %bb20, label %bb27 + +bb32: + br i1 undef, label %bb20, label %bb23 + +bb41: + ret void +} diff --git a/test/CodeGen/X86/coalescer-remat.ll b/test/CodeGen/X86/coalescer-remat.ll index 4db520f..eb7b7a8 100644 --- a/test/CodeGen/X86/coalescer-remat.ll +++ b/test/CodeGen/X86/coalescer-remat.ll @@ -1,15 +1,13 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep xor | count 3 -@val = internal global i64 0 ; <i64*> [#uses=1] -@"\01LC" = internal constant [7 x i8] c"0x%lx\0A\00" ; <[7 x i8]*> [#uses=1] +@val = internal global i64 0 +@"\01LC" = internal constant [7 x i8] c"0x%lx\0A\00" define i32 @main() nounwind { entry: - %0 = tail call i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* @val, i64 0, i64 1) ; <i64> [#uses=1] - %1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i64 0), i64 %0) nounwind ; <i32> [#uses=0] - ret i32 0 + %0 = cmpxchg i64* @val, i64 0, i64 1 monotonic + %1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i64 0), i64 %0) nounwind + ret i32 0 } -declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind - declare i32 @printf(i8*, ...) nounwind diff --git a/test/CodeGen/X86/code_placement_eh.ll b/test/CodeGen/X86/code_placement_eh.ll index 172d591..2da3f9f 100644 --- a/test/CodeGen/X86/code_placement_eh.ll +++ b/test/CodeGen/X86/code_placement_eh.ll @@ -22,11 +22,13 @@ bb18.i5.i: ; preds = %.noexc6.i.i, %bb51. to label %.noexc6.i.i unwind label %lpad.i.i ; <float> [#uses=0] lpad.i.i: ; preds = %bb18.i5.i, %.noexc6.i.i - %eh_ptr.i.i = call i8* @llvm.eh.exception() ; <i8*> [#uses=1] + %lpadval.i.i = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 + catch i8* null unreachable lpad59.i: ; preds = %bb15 - %eh_ptr60.i = call i8* @llvm.eh.exception() ; <i8*> [#uses=1] + %lpadval60.i.i = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 + catch i8* null unreachable bb15: ; preds = %.noexc3, %invcont5 @@ -34,9 +36,7 @@ bb15: ; preds = %.noexc3, %invcont5 to label %.noexc3 unwind label %lpad59.i } -declare i8* @llvm.eh.exception() nounwind readonly - -declare i32 @llvm.eh.selector(i8*, i8*, ...) nounwind +declare i32 @__gxx_personality_v0(...) declare float @sinf(float) readonly diff --git a/test/CodeGen/X86/crash-nosse.ll b/test/CodeGen/X86/crash-nosse.ll new file mode 100644 index 0000000..1cec25b --- /dev/null +++ b/test/CodeGen/X86/crash-nosse.ll @@ -0,0 +1,27 @@ +; RUN: llc < %s -mattr=-sse2,-sse41 -verify-machineinstrs +target triple = "x86_64-unknown-linux-gnu" + +; PR10503 +; This test case produces INSERT_SUBREG 0, <undef> instructions that +; ProcessImplicitDefs doesn't eliminate. +define void @autogen_136178_500() { +BB: + %Shuff6 = shufflevector <32 x i32> undef, <32 x i32> undef, <32 x i32> <i32 27, i32 29, i32 31, i32 undef, i32 undef, i32 37, i32 39, i32 41, i32 undef, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 undef, i32 61, i32 63, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 undef, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25> + %S17 = select i1 true, <8 x float>* null, <8 x float>* null + br label %CF + +CF: ; preds = %CF, %BB + %L19 = load <8 x float>* %S17 + %BC = bitcast <32 x i32> %Shuff6 to <32 x float> + %S28 = fcmp ord double 0x3ED1A1F787BB2185, 0x3EE59DE55A8DF890 + br i1 %S28, label %CF, label %CF39 + +CF39: ; preds = %CF39, %CF + store <8 x float> %L19, <8 x float>* %S17 + %I35 = insertelement <32 x float> %BC, float 0x3EC2489F60000000, i32 9 + %S38 = fcmp ule double 0x3EE59DE55A8DF890, 0x3EC4AB0CBB986A1A + br i1 %S38, label %CF39, label %CF40 + +CF40: ; preds = %CF39 + ret void +} diff --git a/test/CodeGen/X86/crash.ll b/test/CodeGen/X86/crash.ll index b5b1ad4..1531457 100644 --- a/test/CodeGen/X86/crash.ll +++ b/test/CodeGen/X86/crash.ll @@ -316,3 +316,78 @@ declare void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_c declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind + +; PR10463 +; Spilling a virtual register with <undef> uses. +define void @autogen_239_1000() { +BB: + %Shuff = shufflevector <8 x double> undef, <8 x double> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 undef, i32 undef> + br label %CF + +CF: + %B16 = frem <8 x double> zeroinitializer, %Shuff + %E19 = extractelement <8 x double> %Shuff, i32 5 + br i1 undef, label %CF, label %CF75 + +CF75: + br i1 undef, label %CF75, label %CF76 + +CF76: + store double %E19, double* undef + br i1 undef, label %CF76, label %CF77 + +CF77: + %B55 = fmul <8 x double> %B16, undef + br label %CF77 +} + +; PR10527 +define void @pr10527() nounwind uwtable { +entry: + br label %"4" + +"3": + %0 = load <2 x i32>* null, align 8 + %1 = xor <2 x i32> zeroinitializer, %0 + %2 = and <2 x i32> %1, %6 + %3 = or <2 x i32> undef, %2 + %4 = and <2 x i32> %3, undef + store <2 x i32> %4, <2 x i32>* undef + %5 = load <2 x i32>* undef, align 1 + br label %"4" + +"4": + %6 = phi <2 x i32> [ %5, %"3" ], [ zeroinitializer, %entry ] + %7 = icmp ult i32 undef, undef + br i1 %7, label %"3", label %"5" + +"5": + ret void +} + +; PR11078 +; +; A virtual register used by the "foo" inline asm memory operand gets +; constrained to GR32_ABCD during coalescing. This makes the inline asm +; impossible to allocate without splitting the live range and reinflating the +; register class around the inline asm. +; +; The constraint originally comes from the TEST8ri optimization of (icmp (and %t0, 1), 0). + +@__force_order = external hidden global i32, align 4 +define void @pr11078(i32* %pgd) nounwind { +entry: + %t0 = load i32* %pgd, align 4 + %and2 = and i32 %t0, 1 + %tobool = icmp eq i32 %and2, 0 + br i1 %tobool, label %if.then, label %if.end + +if.then: + %t1 = tail call i32 asm sideeffect "bar", "=r,=*m,~{dirflag},~{fpsr},~{flags}"(i32* @__force_order) nounwind + br label %if.end + +if.end: + %t6 = inttoptr i32 %t0 to i64* + %t11 = tail call i64 asm sideeffect "foo", "=*m,=A,{bx},{cx},1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %t6, i32 0, i32 0, i64 0) nounwind + ret void +} diff --git a/test/CodeGen/X86/dbg-at-specficiation.ll b/test/CodeGen/X86/dbg-at-specficiation.ll new file mode 100644 index 0000000..aa5e6ef --- /dev/null +++ b/test/CodeGen/X86/dbg-at-specficiation.ll @@ -0,0 +1,20 @@ +; RUN: llc < %s | FileCheck %s +; Radar 10147769 +; Do not unnecessarily use AT_specification DIE. +; CHECK-NOT: AT_specification + +@a = common global [10 x i32] zeroinitializer, align 16 + +!llvm.dbg.cu = !{!0} + +!0 = metadata !{i32 720913, i32 0, i32 12, metadata !"x.c", metadata !"/private/tmp", metadata !"clang version 3.0 (trunk 140253)", i1 true, i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3} ; [ DW_TAG_compile_unit ] +!1 = metadata !{metadata !2} +!2 = metadata !{i32 0} +!3 = metadata !{metadata !4} +!4 = metadata !{metadata !5} +!5 = metadata !{i32 720948, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !6, i32 1, metadata !7, i32 0, i32 1, [10 x i32]* @a} ; [ DW_TAG_variable ] +!6 = metadata !{i32 720937, metadata !"x.c", metadata !"/private/tmp", null} ; [ DW_TAG_file_type ] +!7 = metadata !{i32 720897, null, metadata !"", null, i32 0, i64 320, i64 32, i32 0, i32 0, metadata !8, metadata !9, i32 0, i32 0} ; [ DW_TAG_array_type ] +!8 = metadata !{i32 720932, null, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] +!9 = metadata !{metadata !10} +!10 = metadata !{i32 720929, i64 0, i64 9} ; [ DW_TAG_subrange_type ] diff --git a/test/CodeGen/X86/dbg-file-name.ll b/test/CodeGen/X86/dbg-file-name.ll index 3a849aa..138ee26 100644 --- a/test/CodeGen/X86/dbg-file-name.ll +++ b/test/CodeGen/X86/dbg-file-name.ll @@ -1,7 +1,7 @@ ; RUN: llc -mtriple x86_64-apple-darwin10.0.0 < %s | FileCheck %s ; Radar 8884898 -; CHECK: file 1 "/Users/manav/one/two{{/|\\\\}}simple.c" +; CHECK: file 1 "/Users/manav/one/two" "simple.c" declare i32 @printf(i8*, ...) nounwind diff --git a/test/CodeGen/X86/dbg-inline.ll b/test/CodeGen/X86/dbg-inline.ll new file mode 100644 index 0000000..523c62e --- /dev/null +++ b/test/CodeGen/X86/dbg-inline.ll @@ -0,0 +1,140 @@ +; RUN: llc < %s | FileCheck %s +; Radar 7881628, 9747970 +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" +target triple = "x86_64-apple-macosx10.7.0" + +%class.APFloat = type { i32 } + +define i32 @_ZNK7APFloat9partCountEv(%class.APFloat* nocapture %this) nounwind uwtable readonly optsize ssp align 2 { +entry: + tail call void @llvm.dbg.value(metadata !{%class.APFloat* %this}, i64 0, metadata !28), !dbg !41 + %prec = getelementptr inbounds %class.APFloat* %this, i64 0, i32 0, !dbg !42 + %tmp = load i32* %prec, align 4, !dbg !42, !tbaa !44 + tail call void @llvm.dbg.value(metadata !{i32 %tmp}, i64 0, metadata !47), !dbg !48 + %add.i = add i32 %tmp, 42, !dbg !49 + ret i32 %add.i, !dbg !42 +} + +define zeroext i1 @_ZNK7APFloat14bitwiseIsEqualERKS_(%class.APFloat* %this, %class.APFloat* %rhs) uwtable optsize ssp align 2 { +entry: + tail call void @llvm.dbg.value(metadata !{%class.APFloat* %this}, i64 0, metadata !29), !dbg !51 + tail call void @llvm.dbg.value(metadata !{%class.APFloat* %rhs}, i64 0, metadata !30), !dbg !52 + tail call void @llvm.dbg.value(metadata !{%class.APFloat* %this}, i64 0, metadata !53), !dbg !55 + %prec.i = getelementptr inbounds %class.APFloat* %this, i64 0, i32 0, !dbg !56 +;CHECK: DW_TAG_inlined_subroutine +;CHECK: DW_AT_abstract_origin +;CHECK: DW_AT_ranges + %tmp.i = load i32* %prec.i, align 4, !dbg !56, !tbaa !44 + tail call void @llvm.dbg.value(metadata !{i32 %tmp.i}, i64 0, metadata !57), !dbg !58 + %add.i.i = add i32 %tmp.i, 42, !dbg !59 + tail call void @llvm.dbg.value(metadata !{i32 %add.i.i}, i64 0, metadata !31), !dbg !54 + %call2 = tail call i64* @_ZNK7APFloat16significandPartsEv(%class.APFloat* %this) optsize, !dbg !60 + tail call void @llvm.dbg.value(metadata !{i64* %call2}, i64 0, metadata !34), !dbg !60 + %call3 = tail call i64* @_ZNK7APFloat16significandPartsEv(%class.APFloat* %rhs) optsize, !dbg !61 + tail call void @llvm.dbg.value(metadata !{i64* %call3}, i64 0, metadata !37), !dbg !61 + %tmp = zext i32 %add.i.i to i64 + br label %for.cond, !dbg !62 + +for.cond: ; preds = %for.inc, %entry + %indvar = phi i64 [ %indvar.next, %for.inc ], [ 0, %entry ] + %tmp13 = sub i64 %tmp, %indvar, !dbg !62 + %i.0 = trunc i64 %tmp13 to i32, !dbg !62 + %cmp = icmp sgt i32 %i.0, 0, !dbg !62 + br i1 %cmp, label %for.body, label %return, !dbg !62 + +for.body: ; preds = %for.cond + %p.0 = getelementptr i64* %call2, i64 %indvar, !dbg !63 + %tmp6 = load i64* %p.0, align 8, !dbg !63, !tbaa !66 + %tmp8 = load i64* %call3, align 8, !dbg !63, !tbaa !66 + %cmp9 = icmp eq i64 %tmp6, %tmp8, !dbg !63 + br i1 %cmp9, label %for.inc, label %return, !dbg !63 + +for.inc: ; preds = %for.body + %indvar.next = add i64 %indvar, 1, !dbg !67 + br label %for.cond, !dbg !67 + +return: ; preds = %for.cond, %for.body + %retval.0 = phi i1 [ false, %for.body ], [ true, %for.cond ] + ret i1 %retval.0, !dbg !68 +} + +declare i64* @_ZNK7APFloat16significandPartsEv(%class.APFloat*) optsize + +declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone + +!llvm.dbg.cu = !{!0} +!llvm.dbg.sp = !{!1, !7, !12, !23, !24, !25} +!llvm.dbg.lv._ZNK7APFloat9partCountEv = !{!28} +!llvm.dbg.lv._ZNK7APFloat14bitwiseIsEqualERKS_ = !{!29, !30, !31, !34, !37} +!llvm.dbg.lv._ZL16partCountForBitsj = !{!38} +!llvm.dbg.gv = !{!39} + +!0 = metadata !{i32 655377, i32 0, i32 4, metadata !"/Volumes/Athwagate/R9747970/apf.cc", metadata !"/private/tmp", metadata !"clang version 3.0 (trunk 136149)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ] +!1 = metadata !{i32 655406, i32 0, metadata !2, metadata !"bitwiseIsEqual", metadata !"bitwiseIsEqual", metadata !"_ZNK7APFloat14bitwiseIsEqualERKS_", metadata !3, i32 8, metadata !19, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null} ; [ DW_TAG_subprogram ] +!2 = metadata !{i32 655362, metadata !0, metadata !"APFloat", metadata !3, i32 6, i64 32, i64 32, i32 0, i32 0, null, metadata !4, i32 0, null, null} ; [ DW_TAG_class_type ] +!3 = metadata !{i32 655401, metadata !"/Volumes/Athwagate/R9747970/apf.cc", metadata !"/private/tmp", metadata !0} ; [ DW_TAG_file_type ] +!4 = metadata !{metadata !5, metadata !1, metadata !7, metadata !12} +!5 = metadata !{i32 655373, metadata !2, metadata !"prec", metadata !3, i32 13, i64 32, i64 32, i64 0, i32 0, metadata !6} ; [ DW_TAG_member ] +!6 = metadata !{i32 655396, metadata !0, metadata !"unsigned int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ] +!7 = metadata !{i32 655406, i32 0, metadata !2, metadata !"partCount", metadata !"partCount", metadata !"_ZNK7APFloat9partCountEv", metadata !3, i32 9, metadata !8, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null} ; [ DW_TAG_subprogram ] +!8 = metadata !{i32 655381, metadata !3, metadata !"", metadata !3, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !9, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] +!9 = metadata !{metadata !6, metadata !10} +!10 = metadata !{i32 655375, metadata !0, metadata !"", i32 0, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !11} ; [ DW_TAG_pointer_type ] +!11 = metadata !{i32 655398, metadata !0, metadata !"", null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !2} ; [ DW_TAG_const_type ] +!12 = metadata !{i32 655406, i32 0, metadata !2, metadata !"significandParts", metadata !"significandParts", metadata !"_ZNK7APFloat16significandPartsEv", metadata !3, i32 11, metadata !13, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null} ; [ DW_TAG_subprogram ] +!13 = metadata !{i32 655381, metadata !3, metadata !"", metadata !3, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !14, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] +!14 = metadata !{metadata !15, metadata !10} +!15 = metadata !{i32 655375, metadata !0, metadata !"", null, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !16} ; [ DW_TAG_pointer_type ] +!16 = metadata !{i32 655382, metadata !0, metadata !"integerPart", metadata !3, i32 2, i64 0, i64 0, i64 0, i32 0, metadata !17} ; [ DW_TAG_typedef ] +!17 = metadata !{i32 655382, metadata !0, metadata !"uint64_t", metadata !3, i32 1, i64 0, i64 0, i64 0, i32 0, metadata !18} ; [ DW_TAG_typedef ] +!18 = metadata !{i32 655396, metadata !0, metadata !"long long unsigned int", null, i32 0, i64 64, i64 64, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ] +!19 = metadata !{i32 655381, metadata !3, metadata !"", metadata !3, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !20, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] +!20 = metadata !{metadata !21, metadata !10, metadata !22} +!21 = metadata !{i32 655396, metadata !0, metadata !"bool", null, i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ] +!22 = metadata !{i32 655376, metadata !0, null, null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !11} ; [ DW_TAG_reference_type ] +!23 = metadata !{i32 655406, i32 0, metadata !0, metadata !"partCount", metadata !"partCount", metadata !"_ZNK7APFloat9partCountEv", metadata !3, i32 23, metadata !8, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (%class.APFloat*)* @_ZNK7APFloat9partCountEv, null, metadata !7} ; [ DW_TAG_subprogram ] +!24 = metadata !{i32 655406, i32 0, metadata !0, metadata !"bitwiseIsEqual", metadata !"bitwiseIsEqual", metadata !"_ZNK7APFloat14bitwiseIsEqualERKS_", metadata !3, i32 28, metadata !19, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i1 (%class.APFloat*, %class.APFloat*)* @_ZNK7APFloat14bitwiseIsEqualERKS_, null, metadata !1} ; [ DW_TAG_subprogram ] +!25 = metadata !{i32 655406, i32 0, metadata !3, metadata !"partCountForBits", metadata !"partCountForBits", metadata !"", metadata !3, i32 17, metadata !26, i1 true, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, null, null, null} ; [ DW_TAG_subprogram ] +!26 = metadata !{i32 655381, metadata !3, metadata !"", metadata !3, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !27, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] +!27 = metadata !{metadata !6} +!28 = metadata !{i32 655617, metadata !23, metadata !"this", metadata !3, i32 16777238, metadata !10, i32 64, i32 0} ; [ DW_TAG_arg_variable ] +!29 = metadata !{i32 655617, metadata !24, metadata !"this", metadata !3, i32 16777244, metadata !10, i32 64, i32 0} ; [ DW_TAG_arg_variable ] +!30 = metadata !{i32 655617, metadata !24, metadata !"rhs", metadata !3, i32 33554460, metadata !22, i32 0, i32 0} ; [ DW_TAG_arg_variable ] +!31 = metadata !{i32 655616, metadata !32, metadata !"i", metadata !3, i32 29, metadata !33, i32 0, i32 0} ; [ DW_TAG_auto_variable ] +!32 = metadata !{i32 655371, metadata !24, i32 28, i32 56, metadata !3, i32 1} ; [ DW_TAG_lexical_block ] +!33 = metadata !{i32 655396, metadata !0, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] +!34 = metadata !{i32 655616, metadata !32, metadata !"p", metadata !3, i32 30, metadata !35, i32 0, i32 0} ; [ DW_TAG_auto_variable ] +!35 = metadata !{i32 655375, metadata !0, metadata !"", null, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !36} ; [ DW_TAG_pointer_type ] +!36 = metadata !{i32 655398, metadata !0, metadata !"", null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !16} ; [ DW_TAG_const_type ] +!37 = metadata !{i32 655616, metadata !32, metadata !"q", metadata !3, i32 31, metadata !35, i32 0, i32 0} ; [ DW_TAG_auto_variable ] +!38 = metadata !{i32 655617, metadata !25, metadata !"bits", metadata !3, i32 16777232, metadata !6, i32 0, i32 0} ; [ DW_TAG_arg_variable ] +!39 = metadata !{i32 655412, i32 0, metadata !3, metadata !"integerPartWidth", metadata !"integerPartWidth", metadata !"integerPartWidth", metadata !3, i32 3, metadata !40, i32 1, i32 1, i32 42} ; [ DW_TAG_variable ] +!40 = metadata !{i32 655398, metadata !0, metadata !"", null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !6} ; [ DW_TAG_const_type ] +!41 = metadata !{i32 22, i32 23, metadata !23, null} +!42 = metadata !{i32 24, i32 10, metadata !43, null} +!43 = metadata !{i32 655371, metadata !23, i32 23, i32 1, metadata !3, i32 0} ; [ DW_TAG_lexical_block ] +!44 = metadata !{metadata !"int", metadata !45} +!45 = metadata !{metadata !"omnipotent char", metadata !46} +!46 = metadata !{metadata !"Simple C/C++ TBAA", null} +!47 = metadata !{i32 655617, metadata !25, metadata !"bits", metadata !3, i32 16777232, metadata !6, i32 0, metadata !42} ; [ DW_TAG_arg_variable ] +!48 = metadata !{i32 16, i32 58, metadata !25, metadata !42} +!49 = metadata !{i32 18, i32 3, metadata !50, metadata !42} +!50 = metadata !{i32 655371, metadata !25, i32 17, i32 1, metadata !3, i32 4} ; [ DW_TAG_lexical_block ] +!51 = metadata !{i32 28, i32 15, metadata !24, null} +!52 = metadata !{i32 28, i32 45, metadata !24, null} +!53 = metadata !{i32 655617, metadata !23, metadata !"this", metadata !3, i32 16777238, metadata !10, i32 64, metadata !54} ; [ DW_TAG_arg_variable ] +!54 = metadata !{i32 29, i32 10, metadata !32, null} +!55 = metadata !{i32 22, i32 23, metadata !23, metadata !54} +!56 = metadata !{i32 24, i32 10, metadata !43, metadata !54} +!57 = metadata !{i32 655617, metadata !25, metadata !"bits", metadata !3, i32 16777232, metadata !6, i32 0, metadata !56} ; [ DW_TAG_arg_variable ] +!58 = metadata !{i32 16, i32 58, metadata !25, metadata !56} +!59 = metadata !{i32 18, i32 3, metadata !50, metadata !56} +!60 = metadata !{i32 30, i32 24, metadata !32, null} +!61 = metadata !{i32 31, i32 24, metadata !32, null} +!62 = metadata !{i32 32, i32 3, metadata !32, null} +!63 = metadata !{i32 33, i32 5, metadata !64, null} +!64 = metadata !{i32 655371, metadata !65, i32 32, i32 25, metadata !3, i32 3} ; [ DW_TAG_lexical_block ] +!65 = metadata !{i32 655371, metadata !32, i32 32, i32 3, metadata !3, i32 2} ; [ DW_TAG_lexical_block ] +!66 = metadata !{metadata !"long long", metadata !45} +!67 = metadata !{i32 32, i32 15, metadata !65, null} +!68 = metadata !{i32 37, i32 1, metadata !32, null} diff --git a/test/CodeGen/X86/dbg-large-unsigned-const.ll b/test/CodeGen/X86/dbg-large-unsigned-const.ll new file mode 100644 index 0000000..fc295c6 --- /dev/null +++ b/test/CodeGen/X86/dbg-large-unsigned-const.ll @@ -0,0 +1,61 @@ +; RUN: llc -filetype=obj %s -o /dev/null +; Hanle large unsigned constant values. +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32" +target triple = "i386-apple-macosx10.7.0" + +define zeroext i1 @_Z3iseRKxS0_(i64* nocapture %LHS, i64* nocapture %RHS) nounwind readonly optsize ssp { +entry: + tail call void @llvm.dbg.value(metadata !{i64* %LHS}, i64 0, metadata !7), !dbg !13 + tail call void @llvm.dbg.value(metadata !{i64* %RHS}, i64 0, metadata !11), !dbg !14 + %tmp1 = load i64* %LHS, align 4, !dbg !15, !tbaa !17 + %tmp3 = load i64* %RHS, align 4, !dbg !15, !tbaa !17 + %cmp = icmp eq i64 %tmp1, %tmp3, !dbg !15 + ret i1 %cmp, !dbg !15 +} + +define zeroext i1 @_Z2fnx(i64 %a) nounwind readnone optsize ssp { +entry: + tail call void @llvm.dbg.value(metadata !{i64 %a}, i64 0, metadata !12), !dbg !20 + tail call void @llvm.dbg.value(metadata !{i64 %a}, i64 0, metadata !12), !dbg !20 + tail call void @llvm.dbg.value(metadata !{i64 %a}, i64 0, metadata !21), !dbg !24 + tail call void @llvm.dbg.value(metadata !25, i64 0, metadata !26), !dbg !27 + %cmp.i = icmp eq i64 %a, 9223372036854775807, !dbg !28 + ret i1 %cmp.i, !dbg !22 +} + +declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone + +!llvm.dbg.cu = !{!0} +!llvm.dbg.sp = !{!1, !6} +!llvm.dbg.lv._Z3iseRKxS0_ = !{!7, !11} +!llvm.dbg.lv._Z2fnx = !{!12} + +!0 = metadata !{i32 655377, i32 0, i32 4, metadata !"lli.cc", metadata !"/private/tmp", metadata !"clang version 3.0 (trunk 135593)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ] +!1 = metadata !{i32 655406, i32 0, metadata !2, metadata !"ise", metadata !"ise", metadata !"_Z3iseRKxS0_", metadata !2, i32 2, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i1 (i64*, i64*)* @_Z3iseRKxS0_, null, null} ; [ DW_TAG_subprogram ] +!2 = metadata !{i32 655401, metadata !"lli.cc", metadata !"/private/tmp", metadata !0} ; [ DW_TAG_file_type ] +!3 = metadata !{i32 655381, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] +!4 = metadata !{metadata !5} +!5 = metadata !{i32 655396, metadata !0, metadata !"bool", null, i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ] +!6 = metadata !{i32 655406, i32 0, metadata !2, metadata !"fn", metadata !"fn", metadata !"_Z2fnx", metadata !2, i32 6, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i1 (i64)* @_Z2fnx, null, null} ; [ DW_TAG_subprogram ] +!7 = metadata !{i32 655617, metadata !1, metadata !"LHS", metadata !2, i32 16777218, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] +!8 = metadata !{i32 655376, metadata !0, null, null, i32 0, i64 32, i64 32, i64 0, i32 0, metadata !9} ; [ DW_TAG_reference_type ] +!9 = metadata !{i32 655398, metadata !0, metadata !"", null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !10} ; [ DW_TAG_const_type ] +!10 = metadata !{i32 655396, metadata !0, metadata !"long long int", null, i32 0, i64 64, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] +!11 = metadata !{i32 655617, metadata !1, metadata !"RHS", metadata !2, i32 33554434, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] +!12 = metadata !{i32 655617, metadata !6, metadata !"a", metadata !2, i32 16777222, metadata !10, i32 0, i32 0} ; [ DW_TAG_arg_variable ] +!13 = metadata !{i32 2, i32 27, metadata !1, null} +!14 = metadata !{i32 2, i32 49, metadata !1, null} +!15 = metadata !{i32 3, i32 3, metadata !16, null} +!16 = metadata !{i32 655371, metadata !1, i32 2, i32 54, metadata !2, i32 0} ; [ DW_TAG_lexical_block ] +!17 = metadata !{metadata !"long long", metadata !18} +!18 = metadata !{metadata !"omnipotent char", metadata !19} +!19 = metadata !{metadata !"Simple C/C++ TBAA", null} +!20 = metadata !{i32 6, i32 19, metadata !6, null} +!21 = metadata !{i32 655617, metadata !1, metadata !"LHS", metadata !2, i32 16777218, metadata !8, i32 0, metadata !22} ; [ DW_TAG_arg_variable ] +!22 = metadata !{i32 7, i32 10, metadata !23, null} +!23 = metadata !{i32 655371, metadata !6, i32 6, i32 22, metadata !2, i32 1} ; [ DW_TAG_lexical_block ] +!24 = metadata !{i32 2, i32 27, metadata !1, metadata !22} +!25 = metadata !{i64 9223372036854775807} +!26 = metadata !{i32 655617, metadata !1, metadata !"RHS", metadata !2, i32 33554434, metadata !8, i32 0, metadata !22} ; [ DW_TAG_arg_variable ] +!27 = metadata !{i32 2, i32 49, metadata !1, metadata !22} +!28 = metadata !{i32 3, i32 3, metadata !16, metadata !22} diff --git a/test/CodeGen/X86/dbg-value-isel.ll b/test/CodeGen/X86/dbg-value-isel.ll index d1a9e57..f1101e6 100644 --- a/test/CodeGen/X86/dbg-value-isel.ll +++ b/test/CodeGen/X86/dbg-value-isel.ll @@ -29,8 +29,8 @@ entry: get_local_id.exit: ; preds = %4 %6 = phi i32 [ %5, %4 ] call void @llvm.dbg.value(metadata !{i32 %6}, i64 0, metadata !10), !dbg !12 - %7 = call <4 x i32> @__amdil_get_global_id_int() nounwind - %8 = extractelement <4 x i32> %7, i32 0 + %7 = call <4 x i32> @__amdil_get_global_id_int() nounwind, !dbg !12 + %8 = extractelement <4 x i32> %7, i32 0, !dbg !12 br label %9 ; <label>:9 ; preds = %get_local_id.exit diff --git a/test/CodeGen/X86/extractelement-load.ll b/test/CodeGen/X86/extractelement-load.ll index ee57d9b..06d739c 100644 --- a/test/CodeGen/X86/extractelement-load.ll +++ b/test/CodeGen/X86/extractelement-load.ll @@ -1,9 +1,25 @@ -; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=yonah | not grep movd -; RUN: llc < %s -march=x86-64 -mattr=+sse2 -mcpu=core2 | not grep movd +; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=yonah | FileCheck %s +; RUN: llc < %s -march=x86-64 -mattr=+sse2 -mcpu=core2 | FileCheck %s define i32 @t(<2 x i64>* %val) nounwind { +; CHECK: t: +; CHECK-NOT: movd +; CHECK: movl 8( +; CHECK-NEXT: ret %tmp2 = load <2 x i64>* %val, align 16 ; <<2 x i64>> [#uses=1] %tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1] %tmp4 = extractelement <4 x i32> %tmp3, i32 2 ; <i32> [#uses=1] ret i32 %tmp4 } + +; Case where extractelement of load ends up as undef. +; (Making sure this doesn't crash.) +define i32 @t2(<8 x i32>* %xp) { +; CHECK: t2: +; CHECK: ret + %x = load <8 x i32>* %xp + %Shuff68 = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32 +undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3> + %y = extractelement <8 x i32> %Shuff68, i32 0 + ret i32 %y +} diff --git a/test/CodeGen/X86/fast-isel-atomic.ll b/test/CodeGen/X86/fast-isel-atomic.ll index 74c5868..5f761dd 100644 --- a/test/CodeGen/X86/fast-isel-atomic.ll +++ b/test/CodeGen/X86/fast-isel-atomic.ll @@ -5,13 +5,11 @@ @sc = external global i8 @uc = external global i8 -declare i8 @llvm.atomic.load.and.i8.p0i8(i8* nocapture, i8) nounwind - define void @test_fetch_and_op() nounwind { entry: - %tmp40 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] + %tmp40 = atomicrmw and i8* @sc, i8 11 monotonic store i8 %tmp40, i8* @sc - %tmp41 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] + %tmp41 = atomicrmw and i8* @uc, i8 11 monotonic store i8 %tmp41, i8* @uc ret void } diff --git a/test/CodeGen/X86/fast-isel-cmp-branch.ll b/test/CodeGen/X86/fast-isel-cmp-branch.ll index 12312e8..6e408f8 100644 --- a/test/CodeGen/X86/fast-isel-cmp-branch.ll +++ b/test/CodeGen/X86/fast-isel-cmp-branch.ll @@ -26,5 +26,9 @@ true: return: ret void unw: + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable } + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/fast-isel-gep.ll b/test/CodeGen/X86/fast-isel-gep.ll index 1a2e34e..91d1f5d 100644 --- a/test/CodeGen/X86/fast-isel-gep.ll +++ b/test/CodeGen/X86/fast-isel-gep.ll @@ -104,6 +104,36 @@ invoke.cont16: ; preds = %if.then14 unreachable lpad: ; preds = %if.end19, %if.then14, %if.end, %entry + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable } declare i8* @_ZNK18G__FastAllocString4dataEv() nounwind + + +; PR10605 / rdar://9930964 - Don't fold loads incorrectly. The load should +; happen before the store. +define i32 @test7({i32,i32,i32}* %tmp1, i32 %tmp71, i32 %tmp63) nounwind { +; X64: test7: +; X64: movl 8({{%rdi|%rcx}}), %eax +; X64: movl $4, 8({{%rdi|%rcx}}) + + + %tmp29 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2 + %tmp30 = load i32* %tmp29, align 4 + + %p2 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2 + store i32 4, i32* %p2 + + %tmp72 = or i32 %tmp71, %tmp30 + %tmp73 = icmp ne i32 %tmp63, 32 + br i1 %tmp73, label %T, label %F + +T: + ret i32 %tmp72 + +F: + ret i32 4 +} + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/fast-isel-tls.ll b/test/CodeGen/X86/fast-isel-tls.ll index a5e6642..0963c52 100644 --- a/test/CodeGen/X86/fast-isel-tls.ll +++ b/test/CodeGen/X86/fast-isel-tls.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86 -relocation-model=pic -mtriple=i686-unknown-linux-gnu -fast-isel | grep __tls_get_addr +; RUN: llc < %s -march=x86 -relocation-model=pic -mtriple=i686-unknown-linux-gnu -fast-isel | FileCheck %s ; PR3654 @v = thread_local global i32 0 @@ -8,3 +8,19 @@ entry: %s = add i32 %t, 1 ret i32 %s } + +; CHECK: f: +; CHECK: leal v@TLSGD +; CHECK: __tls_get_addr + +@alias = alias internal i32* @v +define i32 @f_alias() nounwind { +entry: + %t = load i32* @v + %s = add i32 %t, 1 + ret i32 %s +} + +; CHECK: f_alias: +; CHECK: leal v@TLSGD +; CHECK: __tls_get_addr diff --git a/test/CodeGen/X86/fast-isel-x86-64.ll b/test/CodeGen/X86/fast-isel-x86-64.ll index c4afc10..6a5a102 100644 --- a/test/CodeGen/X86/fast-isel-x86-64.ll +++ b/test/CodeGen/X86/fast-isel-x86-64.ll @@ -259,4 +259,27 @@ define void @test21(double* %p1) { ; CHECK: test21: ; CHECK-NOT: pxor ; CHECK: movsd LCPI -}
\ No newline at end of file +} + +; Check that immediate arguments to a function +; do not cause massive spilling and are used +; as immediates just before the call. +define void @test22() nounwind { +entry: + call void @foo22(i32 0) + call void @foo22(i32 1) + call void @foo22(i32 2) + call void @foo22(i32 3) + ret void +; CHECK: test22: +; CHECK: movl $0, %edi +; CHECK: callq _foo22 +; CHECK: movl $1, %edi +; CHECK: callq _foo22 +; CHECK: movl $2, %edi +; CHECK: callq _foo22 +; CHECK: movl $3, %edi +; CHECK: callq _foo22 +} + +declare void @foo22(i32) diff --git a/test/CodeGen/X86/fp-stack-O0-crash.ll b/test/CodeGen/X86/fp-stack-O0-crash.ll index 9b629c0..ae83a02 100644 --- a/test/CodeGen/X86/fp-stack-O0-crash.ll +++ b/test/CodeGen/X86/fp-stack-O0-crash.ll @@ -1,4 +1,4 @@ -; RUN: llc %s -O0 -fast-isel -regalloc=fast -o - +; RUN: llc %s -O0 -fast-isel -regalloc=fast -mcpu=i386 -o - ; PR4767 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" @@ -28,3 +28,22 @@ if.then: ; preds = %cond.false, %cond.t if.end: ; preds = %if.then, %cond.false, %cond.true ret void } + +; PR10575 +; This produces a FP0 = IMPLICIT_DEF instruction. +define void @__m_rankmerge_MOD_dindexmerge_() nounwind { +entry: + br label %"20" + +"20": ; preds = %"23", %entry + %0 = phi double [ undef, %entry ], [ %0, %"23" ] + %1 = phi double [ 0.000000e+00, %entry ], [ %2, %"23" ] + br i1 undef, label %"21", label %"23" + +"21": ; preds = %"20" + ret void + +"23": ; preds = %"20" + %2 = select i1 undef, double %0, double %1 + br label %"20" +} diff --git a/test/CodeGen/X86/haddsub.ll b/test/CodeGen/X86/haddsub.ll new file mode 100644 index 0000000..91758ea --- /dev/null +++ b/test/CodeGen/X86/haddsub.ll @@ -0,0 +1,194 @@ +; RUN: llc < %s -march=x86-64 -mattr=+sse3,-avx | FileCheck %s -check-prefix=SSE3 +; RUN: llc < %s -march=x86-64 -mattr=-sse3,+avx | FileCheck %s -check-prefix=AVX + +; SSE3: haddpd1: +; SSE3-NOT: vhaddpd +; SSE3: haddpd +; AVX: haddpd1: +; AVX: vhaddpd +define <2 x double> @haddpd1(<2 x double> %x, <2 x double> %y) { + %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 0, i32 2> + %b = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 1, i32 3> + %r = fadd <2 x double> %a, %b + ret <2 x double> %r +} + +; SSE3: haddpd2: +; SSE3-NOT: vhaddpd +; SSE3: haddpd +; AVX: haddpd2: +; AVX: vhaddpd +define <2 x double> @haddpd2(<2 x double> %x, <2 x double> %y) { + %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 1, i32 2> + %b = shufflevector <2 x double> %y, <2 x double> %x, <2 x i32> <i32 2, i32 1> + %r = fadd <2 x double> %a, %b + ret <2 x double> %r +} + +; SSE3: haddpd3: +; SSE3-NOT: vhaddpd +; SSE3: haddpd +; AVX: haddpd3: +; AVX: vhaddpd +define <2 x double> @haddpd3(<2 x double> %x) { + %a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 0, i32 undef> + %b = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 1, i32 undef> + %r = fadd <2 x double> %a, %b + ret <2 x double> %r +} + +; SSE3: haddps1: +; SSE3-NOT: vhaddps +; SSE3: haddps +; AVX: haddps1: +; AVX: vhaddps +define <4 x float> @haddps1(<4 x float> %x, <4 x float> %y) { + %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %b = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %r = fadd <4 x float> %a, %b + ret <4 x float> %r +} + +; SSE3: haddps2: +; SSE3-NOT: vhaddps +; SSE3: haddps +; AVX: haddps2: +; AVX: vhaddps +define <4 x float> @haddps2(<4 x float> %x, <4 x float> %y) { + %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 1, i32 2, i32 5, i32 6> + %b = shufflevector <4 x float> %y, <4 x float> %x, <4 x i32> <i32 4, i32 7, i32 0, i32 3> + %r = fadd <4 x float> %a, %b + ret <4 x float> %r +} + +; SSE3: haddps3: +; SSE3-NOT: vhaddps +; SSE3: haddps +; AVX: haddps3: +; AVX: vhaddps +define <4 x float> @haddps3(<4 x float> %x) { + %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6> + %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 3, i32 5, i32 7> + %r = fadd <4 x float> %a, %b + ret <4 x float> %r +} + +; SSE3: haddps4: +; SSE3-NOT: vhaddps +; SSE3: haddps +; AVX: haddps4: +; AVX: vhaddps +define <4 x float> @haddps4(<4 x float> %x) { + %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef> + %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 undef, i32 undef> + %r = fadd <4 x float> %a, %b + ret <4 x float> %r +} + +; SSE3: haddps5: +; SSE3-NOT: vhaddps +; SSE3: haddps +; AVX: haddps5: +; AVX: vhaddps +define <4 x float> @haddps5(<4 x float> %x) { + %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 undef, i32 undef> + %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 undef, i32 undef> + %r = fadd <4 x float> %a, %b + ret <4 x float> %r +} + +; SSE3: haddps6: +; SSE3-NOT: vhaddps +; SSE3: haddps +; AVX: haddps6: +; AVX: vhaddps +define <4 x float> @haddps6(<4 x float> %x) { + %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef> + %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef> + %r = fadd <4 x float> %a, %b + ret <4 x float> %r +} + +; SSE3: haddps7: +; SSE3-NOT: vhaddps +; SSE3: haddps +; AVX: haddps7: +; AVX: vhaddps +define <4 x float> @haddps7(<4 x float> %x) { + %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 3, i32 undef, i32 undef> + %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 undef, i32 undef> + %r = fadd <4 x float> %a, %b + ret <4 x float> %r +} + +; SSE3: hsubpd1: +; SSE3-NOT: vhsubpd +; SSE3: hsubpd +; AVX: hsubpd1: +; AVX: vhsubpd +define <2 x double> @hsubpd1(<2 x double> %x, <2 x double> %y) { + %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 0, i32 2> + %b = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 1, i32 3> + %r = fsub <2 x double> %a, %b + ret <2 x double> %r +} + +; SSE3: hsubpd2: +; SSE3-NOT: vhsubpd +; SSE3: hsubpd +; AVX: hsubpd2: +; AVX: vhsubpd +define <2 x double> @hsubpd2(<2 x double> %x) { + %a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 0, i32 undef> + %b = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 1, i32 undef> + %r = fsub <2 x double> %a, %b + ret <2 x double> %r +} + +; SSE3: hsubps1: +; SSE3-NOT: vhsubps +; SSE3: hsubps +; AVX: hsubps1: +; AVX: vhsubps +define <4 x float> @hsubps1(<4 x float> %x, <4 x float> %y) { + %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %b = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %r = fsub <4 x float> %a, %b + ret <4 x float> %r +} + +; SSE3: hsubps2: +; SSE3-NOT: vhsubps +; SSE3: hsubps +; AVX: hsubps2: +; AVX: vhsubps +define <4 x float> @hsubps2(<4 x float> %x) { + %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6> + %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 3, i32 5, i32 7> + %r = fsub <4 x float> %a, %b + ret <4 x float> %r +} + +; SSE3: hsubps3: +; SSE3-NOT: vhsubps +; SSE3: hsubps +; AVX: hsubps3: +; AVX: vhsubps +define <4 x float> @hsubps3(<4 x float> %x) { + %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef> + %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 undef, i32 undef> + %r = fsub <4 x float> %a, %b + ret <4 x float> %r +} + +; SSE3: hsubps4: +; SSE3-NOT: vhsubps +; SSE3: hsubps +; AVX: hsubps4: +; AVX: vhsubps +define <4 x float> @hsubps4(<4 x float> %x) { + %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef> + %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef> + %r = fsub <4 x float> %a, %b + ret <4 x float> %r +} diff --git a/test/CodeGen/X86/hidden-vis.ll b/test/CodeGen/X86/hidden-vis.ll index a948bdf..fcb74fc 100644 --- a/test/CodeGen/X86/hidden-vis.ll +++ b/test/CodeGen/X86/hidden-vis.ll @@ -1,8 +1,11 @@ ; RUN: llc < %s -mtriple=i686-pc-linux-gnu | FileCheck %s -check-prefix=LINUX ; RUN: llc < %s -mtriple=i686-apple-darwin8 | FileCheck %s -check-prefix=DARWIN +; RUN: llc < %s -mtriple=x86_64-w64-mingw32 | FileCheck %s -check-prefix=WINDOWS + @a = hidden global i32 0 -@b = external global i32 +@b = external hidden global i32 +@c = global i32* @b define weak hidden void @t1() nounwind { ; LINUX: .hidden t1 @@ -10,15 +13,19 @@ define weak hidden void @t1() nounwind { ; DARWIN: .private_extern _t1 ; DARWIN: t1: + +; WINDOWS: t1: +; WINDOWS-NOT: hidden ret void } define weak void @t2() nounwind { -; LINUX: t2: -; LINUX: .hidden a - -; DARWIN: t2: -; DARWIN: .private_extern _a +; DARWIN: .weak_definition _t2 ret void } +; LINUX: .hidden a +; LINUX: .hidden b + +; DARWIN: .private_extern _a +; DARWIN-NOT: private_extern diff --git a/test/CodeGen/X86/inline-asm-fpstack.ll b/test/CodeGen/X86/inline-asm-fpstack.ll index 8e48bbe..c9a1c1c 100644 --- a/test/CodeGen/X86/inline-asm-fpstack.ll +++ b/test/CodeGen/X86/inline-asm-fpstack.ll @@ -329,3 +329,14 @@ entry: %asmresult = extractvalue %complex %0, 0 ret float %asmresult } + +; Pass the same value in two fixed stack slots. +; CHECK: PR10602 +; CHECK: flds LCPI +; CHECK: fld %st(0) +; CHECK: fcomi %st(1), %st(0) +define i32 @PR10602() nounwind ssp { +entry: + %0 = tail call i32 asm "fcomi $2, $1; pushf; pop $0", "=r,{st},{st(1)},~{dirflag},~{fpsr},~{flags}"(double 2.000000e+00, double 2.000000e+00) nounwind + ret i32 %0 +} diff --git a/test/CodeGen/X86/iv-users-in-other-loops.ll b/test/CodeGen/X86/iv-users-in-other-loops.ll index 8385a29..8f79fb8 100644 --- a/test/CodeGen/X86/iv-users-in-other-loops.ll +++ b/test/CodeGen/X86/iv-users-in-other-loops.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86-64 -o %t +; RUN: llc < %s -march=x86-64 -enable-lsr-nested -o %t ; RUN: not grep inc %t ; RUN: grep dec %t | count 2 ; RUN: grep addq %t | count 12 @@ -11,6 +11,10 @@ ; to insert new induction variables. Previously it would create a ; flood of new induction variables. ; Also, the loop reversal should kick in once. +; +; In this example, performing LSR on the entire loop nest, +; as opposed to only the inner loop can further reduce induction variables, +; and their related instructions and registers. target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" target triple = "x86_64-unknown-linux-gnu" diff --git a/test/CodeGen/X86/lfence.ll b/test/CodeGen/X86/lfence.ll index 7a96ca3..1903a1e 100644 --- a/test/CodeGen/X86/lfence.ll +++ b/test/CodeGen/X86/lfence.ll @@ -1,8 +1,8 @@ ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep lfence -declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1) +declare void @llvm.x86.sse2.lfence() nounwind define void @test() { - call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 false, i1 true) - ret void + call void @llvm.x86.sse2.lfence() + ret void } diff --git a/test/CodeGen/X86/licm-dominance.ll b/test/CodeGen/X86/licm-dominance.ll new file mode 100644 index 0000000..8a0958d --- /dev/null +++ b/test/CodeGen/X86/licm-dominance.ll @@ -0,0 +1,36 @@ +; RUN: llc -asm-verbose=false < %s | FileCheck %s + +; MachineLICM should check dominance before hoisting instructions. +; CHECK: jne LBB0_3 +; CHECK-NEXT: xorb %al, %al +; CHECK-NEXT: testb %al, %al + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" +target triple = "x86_64-apple-macosx10.7.2" + +define void @CMSColorWorldCreateParametricData() nounwind uwtable optsize ssp { +entry: + br label %for.body.i + +for.body.i: + br i1 undef, label %for.inc.i, label %if.then26.i + +if.then26.i: + br i1 undef, label %if.else.i.i, label %lor.lhs.false.i.i + +if.else.i.i: + br i1 undef, label %lor.lhs.false.i.i, label %if.then116.i.i + +lor.lhs.false.i.i: + br i1 undef, label %for.inc.i, label %if.then116.i.i + +if.then116.i.i: + unreachable + +for.inc.i: + %cmp17.i = icmp ult i64 undef, undef + br i1 %cmp17.i, label %for.body.i, label %if.end28.i + +if.end28.i: + ret void +} diff --git a/test/CodeGen/X86/licm-nested.ll b/test/CodeGen/X86/licm-nested.ll index b0105ac..c3f991d 100644 --- a/test/CodeGen/X86/licm-nested.ll +++ b/test/CodeGen/X86/licm-nested.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=x86_64-apple-darwin -march=x86-64 < %s -o /dev/null -stats -info-output-file - | grep machine-licm | grep 3 +; RUN: llc -mtriple=x86_64-apple-darwin -march=x86-64 < %s -o /dev/null -stats -info-output-file - | grep "hoisted out of loops" | grep 3 ; MachineLICM should be able to hoist the symbolic addresses out of ; the inner loops. diff --git a/test/CodeGen/X86/lock-inst-encoding.ll b/test/CodeGen/X86/lock-inst-encoding.ll index 2d10fbc..9765fae 100644 --- a/test/CodeGen/X86/lock-inst-encoding.ll +++ b/test/CodeGen/X86/lock-inst-encoding.ll @@ -3,19 +3,42 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.0.0" -; CHECK: f0: -; CHECK: addq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0, +; CHECK: f1: +; CHECK: addq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x01,0x37] ; CHECK: ret -define void @f0(i64* %a0) nounwind { - %t0 = and i64 1, 1 - call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) nounwind - %1 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %a0, i64 %t0) nounwind - call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) nounwind +define void @f1(i64* %a, i64 %b) nounwind { + %1 = atomicrmw add i64* %a, i64 %b monotonic ret void } -declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind +; CHECK: f2: +; CHECK: subq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x29,0x37] +; CHECK: ret +define void @f2(i64* %a, i64 %b) nounwind { + %1 = atomicrmw sub i64* %a, i64 %b monotonic + ret void +} -declare i32 @llvm.atomic.load.and.i32.p0i32(i32* nocapture, i32) nounwind +; CHECK: f3: +; CHECK: andq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x21,0x37] +; CHECK: ret +define void @f3(i64* %a, i64 %b) nounwind { + %1 = atomicrmw and i64* %a, i64 %b monotonic + ret void +} -declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind +; CHECK: f4: +; CHECK: orq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x09,0x37] +; CHECK: ret +define void @f4(i64* %a, i64 %b) nounwind { + %1 = atomicrmw or i64* %a, i64 %b monotonic + ret void +} + +; CHECK: f5: +; CHECK: xorq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x31,0x37] +; CHECK: ret +define void @f5(i64* %a, i64 %b) nounwind { + %1 = atomicrmw xor i64* %a, i64 %b monotonic + ret void +} diff --git a/test/CodeGen/X86/loop-strength-reduce3.ll b/test/CodeGen/X86/loop-strength-reduce3.ll index c45a374..d6c265f 100644 --- a/test/CodeGen/X86/loop-strength-reduce3.ll +++ b/test/CodeGen/X86/loop-strength-reduce3.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=x86 | grep cmp | grep 240 -; RUN: llc < %s -march=x86 | grep inc | count 1 +; RUN: llc < %s -march=x86 -enable-lsr-nested | grep cmp | grep 240 +; RUN: llc < %s -march=x86 -enable-lsr-nested | grep inc | count 1 define i32 @foo(i32 %A, i32 %B, i32 %C, i32 %D) nounwind { entry: diff --git a/test/CodeGen/X86/lzcnt.ll b/test/CodeGen/X86/lzcnt.ll new file mode 100644 index 0000000..e5a55ab --- /dev/null +++ b/test/CodeGen/X86/lzcnt.ll @@ -0,0 +1,38 @@ +; RUN: llc < %s -march=x86-64 -mattr=+lzcnt | FileCheck %s + +define i32 @t1(i32 %x) nounwind { + %tmp = tail call i32 @llvm.ctlz.i32( i32 %x ) + ret i32 %tmp +; CHECK: t1: +; CHECK: lzcntl +} + +declare i32 @llvm.ctlz.i32(i32) nounwind readnone + +define i16 @t2(i16 %x) nounwind { + %tmp = tail call i16 @llvm.ctlz.i16( i16 %x ) + ret i16 %tmp +; CHECK: t2: +; CHECK: lzcntw +} + +declare i16 @llvm.ctlz.i16(i16) nounwind readnone + +define i64 @t3(i64 %x) nounwind { + %tmp = tail call i64 @llvm.ctlz.i64( i64 %x ) + ret i64 %tmp +; CHECK: t3: +; CHECK: lzcntq +} + +declare i64 @llvm.ctlz.i64(i64) nounwind readnone + +define i8 @t4(i8 %x) nounwind { + %tmp = tail call i8 @llvm.ctlz.i8( i8 %x ) + ret i8 %tmp +; CHECK: t4: +; CHECK: lzcntw +} + +declare i8 @llvm.ctlz.i8(i8) nounwind readnone + diff --git a/test/CodeGen/X86/membarrier.ll b/test/CodeGen/X86/membarrier.ll index 42f8ef5..5e569aa 100644 --- a/test/CodeGen/X86/membarrier.ll +++ b/test/CodeGen/X86/membarrier.ll @@ -5,11 +5,8 @@ define i32 @t() { entry: %i = alloca i32, align 4 store i32 1, i32* %i, align 4 - call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - %0 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %i, i32 1) - call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + fence seq_cst + %0 = atomicrmw sub i32* %i, i32 1 monotonic + fence seq_cst ret i32 0 } - -declare i32 @llvm.atomic.load.sub.i32.p0i32(i32* nocapture, i32) nounwind -declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind diff --git a/test/CodeGen/X86/mfence.ll b/test/CodeGen/X86/mfence.ll index a1b2283..6056add 100644 --- a/test/CodeGen/X86/mfence.ll +++ b/test/CodeGen/X86/mfence.ll @@ -2,19 +2,7 @@ ; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep lfence ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep mfence - -declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1) - define void @test() { - call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 false, i1 true) - call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 false, i1 true) - call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 true, i1 true) - - call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 false, i1 true) - call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 true, i1 true) - call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 true, i1 true) - - call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true , i1 true) - call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 false , i1 true) - ret void + fence seq_cst + ret void } diff --git a/test/CodeGen/X86/mmx-pinsrw.ll b/test/CodeGen/X86/mmx-pinsrw.ll index 6062b50..d9c7c67 100644 --- a/test/CodeGen/X86/mmx-pinsrw.ll +++ b/test/CodeGen/X86/mmx-pinsrw.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | grep pinsrw | count 1 +; RUN: llc < %s -mtriple=x86_64-linux -mcpu=corei7 | grep pinsr ; PR2562 external global i16 ; <i16*>:0 [#uses=1] diff --git a/test/CodeGen/X86/mmx-vzmovl-2.ll b/test/CodeGen/X86/mmx-vzmovl-2.ll deleted file mode 100644 index a7ce7d9..0000000 --- a/test/CodeGen/X86/mmx-vzmovl-2.ll +++ /dev/null @@ -1,29 +0,0 @@ -; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | grep pxor -; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | grep punpckldq - - %struct.vS1024 = type { [8 x <4 x i32>] } - %struct.vS512 = type { [4 x <4 x i32>] } - -declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32) nounwind readnone - -define void @t() nounwind { -entry: - br label %bb554 - -bb554: ; preds = %bb554, %entry - %sum.0.reg2mem.0 = phi <1 x i64> [ %tmp562, %bb554 ], [ zeroinitializer, %entry ] ; <<1 x i64>> [#uses=1] - %0 = load x86_mmx* null, align 8 ; <<1 x i64>> [#uses=2] - %1 = bitcast x86_mmx %0 to <2 x i32> ; <<2 x i32>> [#uses=1] - %tmp555 = and <2 x i32> %1, < i32 -1, i32 0 > ; <<2 x i32>> [#uses=1] - %2 = bitcast <2 x i32> %tmp555 to x86_mmx ; <<1 x i64>> [#uses=1] - %3 = call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %0, i32 32) nounwind readnone ; <<1 x i64>> [#uses=1] - store <1 x i64> %sum.0.reg2mem.0, <1 x i64>* null - %tmp3 = bitcast x86_mmx %2 to <1 x i64> - %tmp558 = add <1 x i64> %sum.0.reg2mem.0, %tmp3 ; <<1 x i64>> [#uses=1] - %tmp5 = bitcast <1 x i64> %tmp558 to x86_mmx - %4 = call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %tmp5, i32 32) nounwind readnone ; <<1 x i64>> [#uses=1] - %tmp6 = bitcast x86_mmx %4 to <1 x i64> - %tmp7 = bitcast x86_mmx %3 to <1 x i64> - %tmp562 = add <1 x i64> %tmp6, %tmp7 ; <<1 x i64>> [#uses=1] - br label %bb554 -} diff --git a/test/CodeGen/X86/mmx-vzmovl.ll b/test/CodeGen/X86/mmx-vzmovl.ll deleted file mode 100644 index 191e261..0000000 --- a/test/CodeGen/X86/mmx-vzmovl.ll +++ /dev/null @@ -1,15 +0,0 @@ -; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | grep movq | count 2 -; There are no MMX operations here; this is promoted to XMM. - -define void @foo(<1 x i64>* %a, <1 x i64>* %b) nounwind { -entry: - %0 = load <1 x i64>* %a, align 8 ; <<1 x i64>> [#uses=1] - %1 = bitcast <1 x i64> %0 to <2 x i32> ; <<2 x i32>> [#uses=1] - %2 = and <2 x i32> %1, < i32 -1, i32 0 > ; <<2 x i32>> [#uses=1] - %3 = bitcast <2 x i32> %2 to <1 x i64> ; <<1 x i64>> [#uses=1] - store <1 x i64> %3, <1 x i64>* %b, align 8 - br label %bb2 - -bb2: ; preds = %entry - ret void -} diff --git a/test/CodeGen/X86/movbe.ll b/test/CodeGen/X86/movbe.ll new file mode 100644 index 0000000..3d3d8cf --- /dev/null +++ b/test/CodeGen/X86/movbe.ll @@ -0,0 +1,36 @@ +; RUN: llc -mtriple=x86_64-linux -mcpu=atom < %s | FileCheck %s + +declare i32 @llvm.bswap.i32(i32) nounwind readnone +declare i64 @llvm.bswap.i64(i64) nounwind readnone + +define void @test1(i32* nocapture %x, i32 %y) nounwind { + %bswap = call i32 @llvm.bswap.i32(i32 %y) + store i32 %bswap, i32* %x, align 4 + ret void +; CHECK: test1: +; CHECK: movbel %esi, (%rdi) +} + +define i32 @test2(i32* %x) nounwind { + %load = load i32* %x, align 4 + %bswap = call i32 @llvm.bswap.i32(i32 %load) + ret i32 %bswap +; CHECK: test2: +; CHECK: movbel (%rdi), %eax +} + +define void @test3(i64* %x, i64 %y) nounwind { + %bswap = call i64 @llvm.bswap.i64(i64 %y) + store i64 %bswap, i64* %x, align 8 + ret void +; CHECK: test3: +; CHECK: movbeq %rsi, (%rdi) +} + +define i64 @test4(i64* %x) nounwind { + %load = load i64* %x, align 8 + %bswap = call i64 @llvm.bswap.i64(i64 %load) + ret i64 %bswap +; CHECK: test4: +; CHECK: movbeq (%rdi), %rax +} diff --git a/test/CodeGen/X86/movgs.ll b/test/CodeGen/X86/movgs.ll index 97b7fe7..aeb540f 100644 --- a/test/CodeGen/X86/movgs.ll +++ b/test/CodeGen/X86/movgs.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86 -mattr=sse41 | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=sse41 | FileCheck %s --check-prefix=X32 ; RUN: llc < %s -mtriple=x86_64-linux -mattr=sse41 | FileCheck %s --check-prefix=X64 ; RUN: llc < %s -mtriple=x86_64-win32 -mattr=sse41 | FileCheck %s --check-prefix=X64 diff --git a/test/CodeGen/X86/2011-05-31-movmsk.ll b/test/CodeGen/X86/movmsk.ll index 2b54d5c..2368548 100644 --- a/test/CodeGen/X86/2011-05-31-movmsk.ll +++ b/test/CodeGen/X86/movmsk.ll @@ -77,3 +77,34 @@ entry: %shr.i = lshr i32 %2, 31 ret i32 %shr.i } + +; rdar://10247336 +; movmskp{s|d} only set low 4/2 bits, high bits are known zero + +define i32 @t1(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp { +entry: +; CHECK: t1: +; CHECK: movmskps +; CHECK-NOT: movslq + %0 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %x) nounwind + %idxprom = sext i32 %0 to i64 + %arrayidx = getelementptr inbounds i32* %indexTable, i64 %idxprom + %1 = load i32* %arrayidx, align 4 + ret i32 %1 +} + +define i32 @t2(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp { +entry: +; CHECK: t2: +; CHECK: movmskpd +; CHECK-NOT: movslq + %0 = bitcast <4 x float> %x to <2 x double> + %1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %0) nounwind + %idxprom = sext i32 %1 to i64 + %arrayidx = getelementptr inbounds i32* %indexTable, i64 %idxprom + %2 = load i32* %arrayidx, align 4 + ret i32 %2 +} + +declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone +declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone diff --git a/test/CodeGen/X86/nofence.ll b/test/CodeGen/X86/nofence.ll deleted file mode 100644 index 244d2e9..0000000 --- a/test/CodeGen/X86/nofence.ll +++ /dev/null @@ -1,27 +0,0 @@ -; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep fence - -declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1) - -define void @test() { - call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 false, i1 false) - call void @llvm.memory.barrier( i1 false, i1 true, i1 false, i1 false, i1 false) - call void @llvm.memory.barrier( i1 false, i1 false, i1 true, i1 false, i1 false) - call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 true, i1 false) - - call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 false, i1 false) - call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 false, i1 false) - call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 true, i1 false) - call void @llvm.memory.barrier( i1 false, i1 true, i1 true, i1 false, i1 false) - call void @llvm.memory.barrier( i1 false, i1 true, i1 false, i1 true, i1 false) - call void @llvm.memory.barrier( i1 false, i1 false, i1 true, i1 true, i1 false) - - call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 false, i1 false) - call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 true, i1 false) - call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 true, i1 false) - call void @llvm.memory.barrier( i1 false, i1 true, i1 true, i1 true, i1 false) - - - call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true , i1 false) - call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 false , i1 false) - ret void -} diff --git a/test/CodeGen/X86/norex-subreg.ll b/test/CodeGen/X86/norex-subreg.ll new file mode 100644 index 0000000..2c529fd --- /dev/null +++ b/test/CodeGen/X86/norex-subreg.ll @@ -0,0 +1,80 @@ +; RUN: llc -O0 < %s +; RUN: llc < %s +target triple = "x86_64-apple-macosx10.7" + +; This test case extracts a sub_8bit_hi sub-register: +; +; %R8B<def> = COPY %BH, %EBX<imp-use,kill> +; %ESI<def> = MOVZX32_NOREXrr8 %R8B<kill> +; +; The register allocation above is invalid, %BH can only be encoded without an +; REX prefix, so the destination register must be GR8_NOREX. The code above +; triggers an assertion in copyPhysReg. +; +; <rdar://problem/10248099> + +define void @f() nounwind uwtable ssp { +entry: + %0 = load i32* undef, align 4 + %add = add i32 0, %0 + %conv1 = trunc i32 %add to i16 + %bf.value = and i16 %conv1, 255 + %1 = and i16 %bf.value, 255 + %2 = shl i16 %1, 8 + %3 = load i16* undef, align 1 + %4 = and i16 %3, 255 + %5 = or i16 %4, %2 + store i16 %5, i16* undef, align 1 + %6 = load i16* undef, align 1 + %7 = lshr i16 %6, 8 + %bf.clear2 = and i16 %7, 255 + %conv3 = zext i16 %bf.clear2 to i32 + %rem = srem i32 %conv3, 15 + %conv4 = trunc i32 %rem to i16 + %bf.value5 = and i16 %conv4, 255 + %8 = and i16 %bf.value5, 255 + %9 = shl i16 %8, 8 + %10 = or i16 undef, %9 + store i16 %10, i16* undef, align 1 + ret void +} + +; This test case extracts a sub_8bit_hi sub-register: +; +; %vreg2<def> = COPY %vreg1:sub_8bit_hi; GR8:%vreg2 GR64_ABCD:%vreg1 +; TEST8ri %vreg2, 1, %EFLAGS<imp-def>; GR8:%vreg2 +; +; %vreg2 must be constrained to GR8_NOREX, or the COPY could become impossible. +; +; PR11088 + +define fastcc i32 @g(i64 %FB) nounwind uwtable readnone align 2 { +entry: + %and32 = and i64 %FB, 256 + %cmp33 = icmp eq i64 %and32, 0 + %Features.6.or35 = select i1 %cmp33, i32 0, i32 undef + %cmp38 = icmp eq i64 undef, 0 + %or40 = or i32 %Features.6.or35, 4 + %Features.8 = select i1 %cmp38, i32 %Features.6.or35, i32 %or40 + %and42 = and i64 %FB, 32 + %or45 = or i32 %Features.8, 2 + %cmp43 = icmp eq i64 %and42, 0 + %Features.8.or45 = select i1 %cmp43, i32 %Features.8, i32 %or45 + %and47 = and i64 %FB, 8192 + %cmp48 = icmp eq i64 %and47, 0 + %or50 = or i32 %Features.8.or45, 32 + %Features.10 = select i1 %cmp48, i32 %Features.8.or45, i32 %or50 + %or55 = or i32 %Features.10, 64 + %Features.10.or55 = select i1 undef, i32 %Features.10, i32 %or55 + %and57 = lshr i64 %FB, 2 + %and57.tr = trunc i64 %and57 to i32 + %or60 = and i32 %and57.tr, 1 + %Features.12 = or i32 %Features.10.or55, %or60 + %and62 = and i64 %FB, 128 + %or65 = or i32 %Features.12, 8 + %cmp63 = icmp eq i64 %and62, 0 + %Features.12.or65 = select i1 %cmp63, i32 %Features.12, i32 %or65 + %Features.14 = select i1 undef, i32 undef, i32 %Features.12.or65 + %Features.16 = select i1 undef, i32 undef, i32 %Features.14 + ret i32 %Features.16 +} diff --git a/test/CodeGen/X86/opt-shuff-tstore.ll b/test/CodeGen/X86/opt-shuff-tstore.ll new file mode 100644 index 0000000..fc24913 --- /dev/null +++ b/test/CodeGen/X86/opt-shuff-tstore.ll @@ -0,0 +1,39 @@ +; RUN: llc -mcpu=corei7 -mtriple=x86_64-linux < %s -promote-elements -mattr=+sse2,+sse41 | FileCheck %s + +; CHECK: func_4_8 +; A single memory write +; CHECK: movd +; CHECK-NEXT: ret +define void @func_4_8(<4 x i8> %param, <4 x i8>* %p) { + %r = add <4 x i8> %param, <i8 1, i8 2, i8 3, i8 4> + store <4 x i8> %r, <4 x i8>* %p + ret void +} + +; CHECK: func_4_16 +; CHECK: movq +; CHECK-NEXT: ret +define void @func_4_16(<4 x i16> %param, <4 x i16>* %p) { + %r = add <4 x i16> %param, <i16 1, i16 2, i16 3, i16 4> + store <4 x i16> %r, <4 x i16>* %p + ret void +} + +; CHECK: func_8_8 +; CHECK: movq +; CHECK-NEXT: ret +define void @func_8_8(<8 x i8> %param, <8 x i8>* %p) { + %r = add <8 x i8> %param, <i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4> + store <8 x i8> %r, <8 x i8>* %p + ret void +} + +; CHECK: func_2_32 +; CHECK: movq +; CHECK-NEXT: ret +define void @func_2_32(<2 x i32> %param, <2 x i32>* %p) { + %r = add <2 x i32> %param, <i32 1, i32 2> + store <2 x i32> %r, <2 x i32>* %p + ret void +} + diff --git a/test/CodeGen/X86/or-address.ll b/test/CodeGen/X86/or-address.ll index b3fc627..f866e41 100644 --- a/test/CodeGen/X86/or-address.ll +++ b/test/CodeGen/X86/or-address.ll @@ -47,10 +47,10 @@ return: ; preds = %bb } ; CHECK: test1: -; CHECK: movl %{{.*}}, (%rdi,%rcx,4) -; CHECK: movl %{{.*}}, 8(%rdi,%rcx,4) -; CHECK: movl %{{.*}}, 4(%rdi,%rcx,4) -; CHECK: movl %{{.*}}, 12(%rdi,%rcx,4) +; CHECK: movl %{{.*}}, (%[[RDI:...]],%[[RCX:...]],4) +; CHECK: movl %{{.*}}, 8(%[[RDI]],%[[RCX]],4) +; CHECK: movl %{{.*}}, 4(%[[RDI]],%[[RCX]],4) +; CHECK: movl %{{.*}}, 12(%[[RDI]],%[[RCX]],4) define void @test1(i32* nocapture %array, i32 %r0, i8 signext %k, i8 signext %i0) nounwind { bb.nph: diff --git a/test/CodeGen/X86/palignr.ll b/test/CodeGen/X86/palignr.ll index 3812c72..6875fb3 100644 --- a/test/CodeGen/X86/palignr.ll +++ b/test/CodeGen/X86/palignr.ll @@ -1,7 +1,8 @@ -; RUN: llc < %s -march=x86 -mcpu=core2 | FileCheck %s +; RUN: llc < %s -march=x86 -mcpu=core2 -mattr=+ssse3 | FileCheck %s ; RUN: llc < %s -march=x86 -mcpu=yonah | FileCheck --check-prefix=YONAH %s define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind { +; CHECK: test1: ; CHECK: pshufd ; CHECK-YONAH: pshufd %C = shufflevector <4 x i32> %A, <4 x i32> undef, <4 x i32> < i32 1, i32 2, i32 3, i32 0 > @@ -9,6 +10,7 @@ define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind { } define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) nounwind { +; CHECK: test2: ; CHECK: palignr ; CHECK-YONAH: shufps %C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 3, i32 4 > @@ -16,43 +18,56 @@ define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) nounwind { } define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) nounwind { +; CHECK: test3: ; CHECK: palignr %C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 undef, i32 4 > ret <4 x i32> %C } define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) nounwind { +; CHECK: test4: ; CHECK: palignr %C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 > ret <4 x i32> %C } define <4 x float> @test5(<4 x float> %A, <4 x float> %B) nounwind { +; CHECK: test5: ; CHECK: palignr %C = shufflevector <4 x float> %A, <4 x float> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 > ret <4 x float> %C } define <8 x i16> @test6(<8 x i16> %A, <8 x i16> %B) nounwind { +; CHECK: test6: ; CHECK: palignr %C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 3, i32 4, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10 > ret <8 x i16> %C } define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) nounwind { +; CHECK: test7: ; CHECK: palignr %C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 undef, i32 6, i32 undef, i32 8, i32 9, i32 10, i32 11, i32 12 > ret <8 x i16> %C } -define <8 x i16> @test8(<8 x i16> %A, <8 x i16> %B) nounwind { -; CHECK: palignr - %C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 undef, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0 > - ret <8 x i16> %C -} - -define <16 x i8> @test9(<16 x i8> %A, <16 x i8> %B) nounwind { +define <16 x i8> @test8(<16 x i8> %A, <16 x i8> %B) nounwind { +; CHECK: test8: ; CHECK: palignr %C = shufflevector <16 x i8> %A, <16 x i8> %B, <16 x i32> < i32 5, i32 6, i32 7, i32 undef, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20 > ret <16 x i8> %C } + +; Check that we don't do unary (circular on single operand) palignr incorrectly. +; (It is possible, but before this testcase was committed, it was being done +; incorrectly. In particular, one of the operands of the palignr node +; was an UNDEF.) +define <8 x i16> @test9(<8 x i16> %A, <8 x i16> %B) nounwind { +; CHECK: test9: +; CHECK-NOT: palignr +; CHECK: pshufb + %C = shufflevector <8 x i16> %B, <8 x i16> %A, <8 x i32> < i32 undef, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0 > + ret <8 x i16> %C +} + diff --git a/test/CodeGen/X86/personality.ll b/test/CodeGen/X86/personality.ll index d3d8e3f..51be7bc 100644 --- a/test/CodeGen/X86/personality.ll +++ b/test/CodeGen/X86/personality.ll @@ -8,6 +8,8 @@ entry: to label %return unwind label %unwind unwind: ; preds = %entry + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup br i1 false, label %eh_then, label %cleanup20 eh_then: ; preds = %unwind @@ -15,7 +17,9 @@ eh_then: ; preds = %unwind to label %return unwind label %unwind10 unwind10: ; preds = %eh_then - %upgraded.eh_select13 = tail call i32 (i8*, i8*, ...)* @llvm.eh.selector(i8* null, i8* bitcast (void ()* @__gxx_personality_v0 to i8*), i32 1) + %exn10 = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup + %upgraded.eh_select13 = extractvalue { i8*, i32 } %exn10, 1 %upgraded.eh_select131 = sext i32 %upgraded.eh_select13 to i64 %tmp18 = icmp slt i64 %upgraded.eh_select131, 0 br i1 %tmp18, label %filter, label %cleanup20 @@ -33,11 +37,9 @@ return: ; preds = %eh_then, %entry declare void @_Z1gv() -declare void @__gxx_personality_v0() - declare void @__cxa_end_catch() -declare i32 @llvm.eh.selector(i8*, i8*, ...) nounwind +declare i32 @__gxx_personality_v0(...) ; X64: zPLR ; X64: .byte 155 diff --git a/test/CodeGen/X86/pr10420.ll b/test/CodeGen/X86/pr10420.ll new file mode 100644 index 0000000..3993f24 --- /dev/null +++ b/test/CodeGen/X86/pr10420.ll @@ -0,0 +1,21 @@ +; RUN: llc < %s -mtriple=x86_64-apple-macosx -disable-cfi | FileCheck %s + +define private void @foo() { + ret void +} + +define void @bar() { + call void @foo() + ret void; +} + +; CHECK: _bar: ## @bar +; CHECK-NEXT: Ltmp2: + +; CHECK: Ltmp12: +; CHECK-NEXT: Ltmp13 = L_foo-Ltmp12 ## FDE initial location +; CHECK-NEXT: .quad Ltmp13 + +; CHECK: Ltmp19: +; CHECK-NEXT: Ltmp20 = Ltmp2-Ltmp19 ## FDE initial location +; CHECK-NEXT: .quad Ltmp20 diff --git a/test/CodeGen/X86/pr3495.ll b/test/CodeGen/X86/pr3495.ll index c612a6e..7efd35b 100644 --- a/test/CodeGen/X86/pr3495.ll +++ b/test/CodeGen/X86/pr3495.ll @@ -1,7 +1,9 @@ -; RUN: llc < %s -march=x86 -stats -regalloc=linearscan |& grep {Number of loads added} | grep 2 -; RUN: llc < %s -march=x86 -stats -regalloc=linearscan |& grep {Number of register spills} | grep 1 -; RUN: llc < %s -march=x86 -stats -regalloc=linearscan |& grep {Number of machine instrs printed} | grep 34 +; RUN: llc < %s -march=x86 -stats -regalloc=linearscan -enable-lsr-nested |& grep {Number of loads added} | grep 2 +; RUN: llc < %s -march=x86 -stats -regalloc=linearscan -enable-lsr-nested |& grep {Number of spill slots allocated} | grep 1 +; RUN: llc < %s -march=x86 -stats -regalloc=linearscan -enable-lsr-nested |& grep {Number of machine instrs printed} | grep 34 ; PR3495 +; +; Note: this should not spill at all with either good LSR or good regalloc. target triple = "i386-pc-linux-gnu" @x = external global [8 x i32], align 32 ; <[8 x i32]*> [#uses=1] diff --git a/test/CodeGen/X86/pr3522.ll b/test/CodeGen/X86/pr3522.ll index da16237..1122530 100644 --- a/test/CodeGen/X86/pr3522.ll +++ b/test/CodeGen/X86/pr3522.ll @@ -21,6 +21,8 @@ return: ; preds = %lpad ret void lpad: ; preds = %entry + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup %2 = icmp eq i8 %1, 90 ; <i1> [#uses=1] br i1 %2, label %return, label %bb22 } @@ -28,3 +30,5 @@ lpad: ; preds = %entry declare void @__gnat_rcheck_12(i8*, i32) noreturn declare i32 @report__ident_int(i32) + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/promote.ll b/test/CodeGen/X86/promote.ll new file mode 100644 index 0000000..b8964f2 --- /dev/null +++ b/test/CodeGen/X86/promote.ll @@ -0,0 +1,31 @@ +; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i8:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + + +; CHECK: mul_f +define i32 @mul_f(<4 x i8>* %A) { +entry: +; CHECK: pmul +; CHECK-NOT: mulb + %0 = load <4 x i8>* %A, align 8 + %mul = mul <4 x i8> %0, %0 + store <4 x i8> %mul, <4 x i8>* undef + ret i32 0 +; CHECK: ret +} + + +; CHECK: shuff_f +define i32 @shuff_f(<4 x i8>* %A) { +entry: +; CHECK: pshufb +; CHECK: paddd +; CHECK: pshufb + %0 = load <4 x i8>* %A, align 8 + %add = add <4 x i8> %0, %0 + store <4 x i8> %add, <4 x i8>* undef + ret i32 0 +; CHECK: ret +} diff --git a/test/CodeGen/X86/scalar_widen_div.ll b/test/CodeGen/X86/scalar_widen_div.ll index adc58ac..e99ea93 100644 --- a/test/CodeGen/X86/scalar_widen_div.ll +++ b/test/CodeGen/X86/scalar_widen_div.ll @@ -3,9 +3,10 @@ ; Verify when widening a divide/remainder operation, we only generate a ; divide/rem per element since divide/remainder can trap. +; CHECK: vectorDiv define void @vectorDiv (<2 x i32> addrspace(1)* %nsource, <2 x i32> addrspace(1)* %dsource, <2 x i32> addrspace(1)* %qdest) nounwind { -; CHECK: idivl -; CHECK: idivl +; CHECK: idivq +; CHECK: idivq ; CHECK-NOT: idivl ; CHECK: ret entry: @@ -32,6 +33,7 @@ entry: ret void } +; CHECK: test_char_div define <3 x i8> @test_char_div(<3 x i8> %num, <3 x i8> %div) { ; CHECK: idivb ; CHECK: idivb @@ -42,6 +44,7 @@ define <3 x i8> @test_char_div(<3 x i8> %num, <3 x i8> %div) { ret <3 x i8> %div.r } +; CHECK: test_uchar_div define <3 x i8> @test_uchar_div(<3 x i8> %num, <3 x i8> %div) { ; CHECK: divb ; CHECK: divb @@ -52,6 +55,7 @@ define <3 x i8> @test_uchar_div(<3 x i8> %num, <3 x i8> %div) { ret <3 x i8> %div.r } +; CHECK: test_short_div define <5 x i16> @test_short_div(<5 x i16> %num, <5 x i16> %div) { ; CHECK: idivw ; CHECK: idivw @@ -64,17 +68,19 @@ define <5 x i16> @test_short_div(<5 x i16> %num, <5 x i16> %div) { ret <5 x i16> %div.r } +; CHECK: test_ushort_div define <4 x i16> @test_ushort_div(<4 x i16> %num, <4 x i16> %div) { -; CHECK: divw -; CHECK: divw -; CHECK: divw -; CHECK: divw -; CHECK-NOT: divw +; CHECK: divl +; CHECK: divl +; CHECK: divl +; CHECK: divl +; CHECK-NOT: divl ; CHECK: ret %div.r = udiv <4 x i16> %num, %div ret <4 x i16> %div.r } +; CHECK: test_uint_div define <3 x i32> @test_uint_div(<3 x i32> %num, <3 x i32> %div) { ; CHECK: divl ; CHECK: divl @@ -85,6 +91,7 @@ define <3 x i32> @test_uint_div(<3 x i32> %num, <3 x i32> %div) { ret <3 x i32> %div.r } +; CHECK: test_long_div define <3 x i64> @test_long_div(<3 x i64> %num, <3 x i64> %div) { ; CHECK: idivq ; CHECK: idivq @@ -95,6 +102,7 @@ define <3 x i64> @test_long_div(<3 x i64> %num, <3 x i64> %div) { ret <3 x i64> %div.r } +; CHECK: test_ulong_div define <3 x i64> @test_ulong_div(<3 x i64> %num, <3 x i64> %div) { ; CHECK: divq ; CHECK: divq @@ -105,18 +113,19 @@ define <3 x i64> @test_ulong_div(<3 x i64> %num, <3 x i64> %div) { ret <3 x i64> %div.r } - +; CHECK: test_char_rem define <4 x i8> @test_char_rem(<4 x i8> %num, <4 x i8> %rem) { -; CHECK: idivb -; CHECK: idivb -; CHECK: idivb -; CHECK: idivb -; CHECK-NOT: idivb +; CHECK: idivl +; CHECK: idivl +; CHECK: idivl +; CHECK: idivl +; CHECK-NOT: idivl ; CHECK: ret %rem.r = srem <4 x i8> %num, %rem ret <4 x i8> %rem.r } +; CHECK: test_short_rem define <5 x i16> @test_short_rem(<5 x i16> %num, <5 x i16> %rem) { ; CHECK: idivw ; CHECK: idivw @@ -129,6 +138,7 @@ define <5 x i16> @test_short_rem(<5 x i16> %num, <5 x i16> %rem) { ret <5 x i16> %rem.r } +; CHECK: test_uint_rem define <4 x i32> @test_uint_rem(<4 x i32> %num, <4 x i32> %rem) { ; CHECK: idivl ; CHECK: idivl @@ -141,6 +151,7 @@ define <4 x i32> @test_uint_rem(<4 x i32> %num, <4 x i32> %rem) { } +; CHECK: test_ulong_rem define <5 x i64> @test_ulong_rem(<5 x i64> %num, <5 x i64> %rem) { ; CHECK: divq ; CHECK: divq @@ -153,6 +164,7 @@ define <5 x i64> @test_ulong_rem(<5 x i64> %num, <5 x i64> %rem) { ret <5 x i64> %rem.r } +; CHECK: test_int_div define void @test_int_div(<3 x i32>* %dest, <3 x i32>* %old, i32 %n) { ; CHECK: idivl ; CHECK: idivl diff --git a/test/CodeGen/X86/scev-interchange.ll b/test/CodeGen/X86/scev-interchange.ll index 81c919f..71a4d21 100644 --- a/test/CodeGen/X86/scev-interchange.ll +++ b/test/CodeGen/X86/scev-interchange.ll @@ -149,6 +149,8 @@ bb71.i: ; preds = %bb.i.i.i262.i, %bb66.i to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i.i unwind label %lpad.i.i.i.i.i.i ; <i8*> [#uses=0] lpad.i.i.i.i.i.i: ; preds = %bb71.i + %exn.i.i.i.i.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable _ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i.i: ; preds = %bb71.i @@ -162,6 +164,8 @@ _ZNSt6vectorIjSaIjEED1Ev.exit.i.i: ; preds = %_ZNSt12_Vector_baseIjSaIjEEC2EmRK to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i12.i.i unwind label %lpad.i.i.i.i8.i.i ; <i8*> [#uses=0] lpad.i.i.i.i8.i.i: ; preds = %_ZNSt6vectorIjSaIjEED1Ev.exit.i.i + %exn.i.i.i.i8.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup invoke void @_Unwind_Resume(i8* undef) to label %.noexc.i9.i.i unwind label %lpad.i19.i.i @@ -179,6 +183,8 @@ bb50.i.i.i: ; preds = %bb.i.i.i.i.i.i.i.i.i.i, %_ZNSt12_Vector_baseIjSaIjEEC2Em to label %bb83.i unwind label %lpad188.i lpad.i19.i.i: ; preds = %lpad.i.i.i.i8.i.i + %exn.i19.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable bb83.i: ; preds = %bb50.i.i.i @@ -192,6 +198,8 @@ invcont84.i: ; preds = %bb83.i to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i unwind label %lpad.i.i.i.i315.i ; <i8*> [#uses=0] lpad.i.i.i.i315.i: ; preds = %invcont84.i + %exn.i.i.i.i315.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup invoke void @_Unwind_Resume(i8* undef) to label %.noexc.i316.i unwind label %lpad.i352.i @@ -209,6 +217,8 @@ bb50.i.i: ; preds = %bb.i.i.i.i.i.i.i.i320.i, %_ZNSt12_Vector_baseIjSaIjEEC2EmR to label %invcont86.i unwind label %lpad200.i lpad.i352.i: ; preds = %lpad.i.i.i.i315.i + %exn.i352.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable invcont86.i: ; preds = %bb50.i.i @@ -232,6 +242,8 @@ invcont101.i: ; preds = %bb100.i to label %_ZN10FullMatrixIdEC1Ejj.exit.i.i unwind label %lpad.i.i.i.i.i lpad.i.i.i.i.i: ; preds = %invcont101.i + %exn.i.i.i.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable _ZN10FullMatrixIdEC1Ejj.exit.i.i: ; preds = %invcont101.i @@ -239,6 +251,8 @@ _ZN10FullMatrixIdEC1Ejj.exit.i.i: ; preds = %invcont101.i to label %_ZN10FullMatrixIdEC1Ejj.exit28.i.i unwind label %lpad.i.i.i27.i.i lpad.i.i.i27.i.i: ; preds = %_ZN10FullMatrixIdEC1Ejj.exit.i.i + %exn.i.i.i27.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup invoke void @_Unwind_Resume(i8* undef) to label %.noexc.i.i unwind label %lpad.i.i @@ -258,6 +272,8 @@ bb.i.i.i297.i.i: ; preds = %bb58.i.i unreachable lpad.i.i: ; preds = %lpad.i.i.i27.i.i + %exn.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable bb.i191.i: ; preds = %.noexc232.i, %bb58.i.i @@ -296,43 +312,71 @@ bb29.loopexit.i.i: ; preds = %.noexc232.i br label %bb9.i216.i lpad.i: ; preds = %entry + %exn.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad120.i: ; preds = %invcont.i + %exn120.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad124.i: ; preds = %invcont1.i + %exn124.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad128.i: ; preds = %invcont3.i + %exn128.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad132.i: ; preds = %invcont4.i + %exn132.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad136.i: ; preds = %invcont6.i + %exn136.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad140.i: ; preds = %bb21.i, %invcont7.i + %exn140.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad144.i: ; preds = %bb10.i168.i, %invcont9.i + %exn144.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad148.i: ; preds = %invcont10.i + %exn148.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad188.i: ; preds = %bb50.i.i.i + %exn188.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad196.i: ; preds = %bb.i191.i + %exn196 = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad200.i: ; preds = %bb50.i.i + %exn200.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable lpad204.i: ; preds = %invcont86.i + %exn204.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable } declare fastcc void @_ZN11Polynomials19LagrangeEquidistant23generate_complete_basisEj(%"struct.std::vector<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >"* noalias nocapture sret, i32) + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/segmented-stacks.ll b/test/CodeGen/X86/segmented-stacks.ll new file mode 100644 index 0000000..ecdb00d --- /dev/null +++ b/test/CodeGen/X86/segmented-stacks.ll @@ -0,0 +1,87 @@ +; RUN: llc < %s -mtriple=i686-linux -segmented-stacks | FileCheck %s -check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-linux -segmented-stacks | FileCheck %s -check-prefix=X64 + +; Just to prevent the alloca from being optimized away +declare void @dummy_use(i32*, i32) + +define i32 @test_basic(i32 %l) { + %mem = alloca i32, i32 %l + call void @dummy_use (i32* %mem, i32 %l) + %terminate = icmp eq i32 %l, 0 + br i1 %terminate, label %true, label %false + +true: + ret i32 0 + +false: + %newlen = sub i32 %l, 1 + %retvalue = call i32 @test_basic(i32 %newlen) + ret i32 %retvalue + +; X32: test_basic: + +; X32: leal -12(%esp), %ecx +; X32-NEXT: cmpl %gs:48, %ecx + +; X32: subl $8, %esp +; X32-NEXT: pushl $4 +; X32-NEXT: pushl $12 +; X32-NEXT: calll __morestack +; X32-NEXT: addl $8, %esp +; X32-NEXT: ret + +; X32: movl %eax, %esp + +; X32: subl $12, %esp +; X32-NEXT: pushl %ecx +; X32-NEXT: calll __morestack_allocate_stack_space +; X32-NEXT: addl $16, %esp + +; X64: test_basic: + +; X64: leaq -24(%rsp), %r11 +; X64-NEXT: cmpq %fs:112, %r11 + +; X64: movabsq $24, %r10 +; X64-NEXT: movabsq $0, %r11 +; X64-NEXT: callq __morestack +; X64-NEXT: ret + +; X64: movq %rsp, %rax +; X64-NEXT: subq %rcx, %rax +; X64-NEXT: cmpq %rax, %fs:112 + +; X64: movq %rax, %rsp + +; X64: movq %rcx, %rdi +; X64-NEXT: callq __morestack_allocate_stack_space + +} + +define i32 @test_nested(i32 * nest %closure, i32 %other) { + %addend = load i32 * %closure + %result = add i32 %other, %addend + ret i32 %result + +; X32: leal (%esp), %edx +; X32-NEXT: cmpl %gs:48, %edx + + +; X32: subl $8, %esp +; X32-NEXT: pushl $4 +; X32-NEXT: pushl $0 +; X32-NEXT: calll __morestack +; X32-NEXT: addl $8, %esp +; X32-NEXT: ret + +; X64: leaq (%rsp), %r11 +; X64-NEXT: cmpq %fs:112, %r11 + +; X64: movq %r10, %rax +; X64-NEXT: movabsq $0, %r10 +; X64-NEXT: movabsq $0, %r11 +; X64-NEXT: callq __morestack +; X64-NEXT: ret +; X64: movq %rax, %r10 + +} diff --git a/test/CodeGen/X86/sfence.ll b/test/CodeGen/X86/sfence.ll index 4782879..0c28407 100644 --- a/test/CodeGen/X86/sfence.ll +++ b/test/CodeGen/X86/sfence.ll @@ -1,8 +1,8 @@ ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep sfence -declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1) +declare void @llvm.x86.sse.sfence() nounwind define void @test() { - call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 true, i1 true) - ret void + call void @llvm.x86.sse.sfence() + ret void } diff --git a/test/CodeGen/X86/sink-hoist.ll b/test/CodeGen/X86/sink-hoist.ll index 31f41ee..e13a817 100644 --- a/test/CodeGen/X86/sink-hoist.ll +++ b/test/CodeGen/X86/sink-hoist.ll @@ -102,6 +102,7 @@ entry: br label %bb60 bb: ; preds = %bb60 + %i.0 = phi i32 [ 0, %bb60 ] ; <i32> [#uses=2] %0 = bitcast float* %x_addr.0 to <4 x float>* ; <<4 x float>*> [#uses=1] %1 = load <4 x float>* %0, align 16 ; <<4 x float>> [#uses=4] %tmp20 = bitcast <4 x float> %1 to <4 x i32> ; <<4 x i32>> [#uses=1] @@ -129,15 +130,14 @@ bb: ; preds = %bb60 %5 = getelementptr float* %x_addr.0, i64 4 ; <float*> [#uses=1] %6 = getelementptr float* %y_addr.0, i64 4 ; <float*> [#uses=1] %7 = add i32 %i.0, 4 ; <i32> [#uses=1] - br label %bb60 + %8 = load i32* %n, align 4 ; <i32> [#uses=1] + %9 = icmp sgt i32 %8, %7 ; <i1> [#uses=1] + br i1 %9, label %bb60, label %return bb60: ; preds = %bb, %entry - %i.0 = phi i32 [ 0, %entry ], [ %7, %bb ] ; <i32> [#uses=2] %x_addr.0 = phi float* [ %x, %entry ], [ %5, %bb ] ; <float*> [#uses=2] %y_addr.0 = phi float* [ %y, %entry ], [ %6, %bb ] ; <float*> [#uses=2] - %8 = load i32* %n, align 4 ; <i32> [#uses=1] - %9 = icmp sgt i32 %8, %i.0 ; <i1> [#uses=1] - br i1 %9, label %bb, label %return + br label %bb return: ; preds = %bb60 ret void diff --git a/test/CodeGen/X86/split-eh-lpad-edges.ll b/test/CodeGen/X86/split-eh-lpad-edges.ll index fd40a7f..756a3dd 100644 --- a/test/CodeGen/X86/split-eh-lpad-edges.ll +++ b/test/CodeGen/X86/split-eh-lpad-edges.ll @@ -28,7 +28,11 @@ invcont27: ; preds = %invcont26 lpad: ; preds = %invcont26, %invcont, %entry %pool.1 = phi %struct.NSAutoreleasePool* [ null, %entry ], [ null, %invcont ], [ null, %invcont26 ] ; <%struct.NSAutoreleasePool*> [#uses=0] + %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + cleanup unreachable } declare %struct.NSObject* @objc_msgSend(%struct.NSObject*, %struct.objc_selector*, ...) + +declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/split-vector-bitcast.ll b/test/CodeGen/X86/split-vector-bitcast.ll new file mode 100644 index 0000000..fae15cf --- /dev/null +++ b/test/CodeGen/X86/split-vector-bitcast.ll @@ -0,0 +1,12 @@ +; RUN: llc < %s -march=x86 -mattr=-sse2,+sse | grep addps + +; PR10497 + another isel issue with sse2 disabled +; (This is primarily checking that this construct doesn't crash.) +define void @a(<2 x float>* %a, <2 x i32>* %b) { + %cc = load <2 x float>* %a + %c = fadd <2 x float> %cc, %cc + %dd = bitcast <2 x float> %c to <2 x i32> + %d = add <2 x i32> %dd, %dd + store <2 x i32> %d, <2 x i32>* %b + ret void +} diff --git a/test/CodeGen/X86/sse-minmax.ll b/test/CodeGen/X86/sse-minmax.ll index ff0af25..af1a73b 100644 --- a/test/CodeGen/X86/sse-minmax.ll +++ b/test/CodeGen/X86/sse-minmax.ll @@ -1,6 +1,6 @@ -; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs | FileCheck %s -; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck -check-prefix=UNSAFE %s -; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs -enable-no-nans-fp-math | FileCheck -check-prefix=FINITE %s +; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs -promote-elements | FileCheck %s +; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs -enable-unsafe-fp-math -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=UNSAFE %s +; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=FINITE %s ; Some of these patterns can be matched as SSE min or max. Some of ; then can be matched provided that the operands are swapped. @@ -933,3 +933,35 @@ entry: %x_addr.0 = select i1 %0, double 3.000000e+03, double %x ; <double> [#uses=1] ret double %x_addr.0 } + +; UNSAFE: maxpd: +; UNSAFE: maxpd +define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) { + %max_is_x = fcmp oge <2 x double> %x, %y + %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y + ret <2 x double> %max +} + +; UNSAFE: minpd: +; UNSAFE: minpd +define <2 x double> @minpd(<2 x double> %x, <2 x double> %y) { + %min_is_x = fcmp ole <2 x double> %x, %y + %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y + ret <2 x double> %min +} + +; UNSAFE: maxps: +; UNSAFE: maxps +define <4 x float> @maxps(<4 x float> %x, <4 x float> %y) { + %max_is_x = fcmp oge <4 x float> %x, %y + %max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y + ret <4 x float> %max +} + +; UNSAFE: minps: +; UNSAFE: minps +define <4 x float> @minps(<4 x float> %x, <4 x float> %y) { + %min_is_x = fcmp ole <4 x float> %x, %y + %min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y + ret <4 x float> %min +} diff --git a/test/CodeGen/X86/sse2-blend.ll b/test/CodeGen/X86/sse2-blend.ll new file mode 100644 index 0000000..56b099e --- /dev/null +++ b/test/CodeGen/X86/sse2-blend.ll @@ -0,0 +1,55 @@ +; RUN: llc < %s -march=x86 -mcpu=yonah -promote-elements -mattr=+sse2,-sse41 | FileCheck %s + + +; currently (xor v4i32) is defined as illegal, so we scalarize the code. + +define void@vsel_float(<4 x float>* %v1, <4 x float>* %v2) { + %A = load <4 x float>* %v1 + %B = load <4 x float>* %v2 + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %A, <4 x float> %B + store <4 x float > %vsel, <4 x float>* %v1 + ret void +} + +; currently (xor v4i32) is defined as illegal, so we scalarize the code. + +define void@vsel_i32(<4 x i32>* %v1, <4 x i32>* %v2) { + %A = load <4 x i32>* %v1 + %B = load <4 x i32>* %v2 + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %A, <4 x i32> %B + store <4 x i32 > %vsel, <4 x i32>* %v1 + ret void +} + +; CHECK: vsel_i64 +; CHECK: pxor +; CHECK: pand +; CHECK: andnps +; CHECK: orps +; CHECK: ret + +define void@vsel_i64(<4 x i64>* %v1, <4 x i64>* %v2) { + %A = load <4 x i64>* %v1 + %B = load <4 x i64>* %v2 + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> %A, <4 x i64> %B + store <4 x i64 > %vsel, <4 x i64>* %v1 + ret void +} + +; CHECK: vsel_double +; CHECK: pxor +; CHECK: pand +; CHECK: andnps +; CHECK: orps +; CHECK: ret + + +define void@vsel_double(<4 x double>* %v1, <4 x double>* %v2) { + %A = load <4 x double>* %v1 + %B = load <4 x double>* %v2 + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> %A, <4 x double> %B + store <4 x double > %vsel, <4 x double>* %v1 + ret void +} + + diff --git a/test/CodeGen/X86/sse41-blend.ll b/test/CodeGen/X86/sse41-blend.ll new file mode 100644 index 0000000..78604a0 --- /dev/null +++ b/test/CodeGen/X86/sse41-blend.ll @@ -0,0 +1,82 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -promote-elements -mattr=+sse41 | FileCheck %s + +;CHECK: vsel_float +;CHECK: blendvps +;CHECK: ret +define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) { + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2 + ret <4 x float> %vsel +} + + +;CHECK: vsel_4xi8 +;CHECK: blendvps +;CHECK: ret +define <4 x i8> @vsel_4xi8(<4 x i8> %v1, <4 x i8> %v2) { + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i8> %v1, <4 x i8> %v2 + ret <4 x i8> %vsel +} + +;CHECK: vsel_4xi16 +;CHECK: blendvps +;CHECK: ret +define <4 x i16> @vsel_4xi16(<4 x i16> %v1, <4 x i16> %v2) { + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i16> %v1, <4 x i16> %v2 + ret <4 x i16> %vsel +} + + +;CHECK: vsel_i32 +;CHECK: blendvps +;CHECK: ret +define <4 x i32> @vsel_i32(<4 x i32> %v1, <4 x i32> %v2) { + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %v1, <4 x i32> %v2 + ret <4 x i32> %vsel +} + + +;CHECK: vsel_double +;CHECK: blendvpd +;CHECK: ret +define <4 x double> @vsel_double(<4 x double> %v1, <4 x double> %v2) { + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> %v1, <4 x double> %v2 + ret <4 x double> %vsel +} + + +;CHECK: vsel_i64 +;CHECK: blendvpd +;CHECK: ret +define <4 x i64> @vsel_i64(<4 x i64> %v1, <4 x i64> %v2) { + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> %v1, <4 x i64> %v2 + ret <4 x i64> %vsel +} + + +;CHECK: vsel_i8 +;CHECK: pblendvb +;CHECK: ret +define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) { + %vsel = select <16 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <16 x i8> %v1, <16 x i8> %v2 + ret <16 x i8> %vsel +} + +;; TEST blend + compares +; CHECK: A +define <2 x double> @A(<2 x double> %x, <2 x double> %y) { + ; CHECK: cmplepd + ; CHECK: blendvpd + %max_is_x = fcmp oge <2 x double> %x, %y + %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y + ret <2 x double> %max +} + +; CHECK: B +define <2 x double> @B(<2 x double> %x, <2 x double> %y) { + ; CHECK: cmpnlepd + ; CHECK: blendvpd + %min_is_x = fcmp ult <2 x double> %x, %y + %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y + ret <2 x double> %min +} + diff --git a/test/CodeGen/X86/sub.ll b/test/CodeGen/X86/sub.ll new file mode 100644 index 0000000..ee5ea1d --- /dev/null +++ b/test/CodeGen/X86/sub.ll @@ -0,0 +1,11 @@ +; RUN: llc -march=x86 < %s | FileCheck %s + +define i32 @test1(i32 %x) { + %xor = xor i32 %x, 31 + %sub = sub i32 32, %xor + ret i32 %sub +; CHECK: test1: +; CHECK: xorl $-32 +; CHECK-NEXT: addl $33 +; CHECK-NEXT: ret +} diff --git a/test/CodeGen/X86/tail-call-got.ll b/test/CodeGen/X86/tail-call-got.ll new file mode 100644 index 0000000..1d7eb2e --- /dev/null +++ b/test/CodeGen/X86/tail-call-got.ll @@ -0,0 +1,24 @@ +; RUN: llc < %s -relocation-model=pic -mattr=+sse2 | FileCheck %s + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32" +target triple = "i386-unknown-freebsd9.0" + +define double @test1(double %x) nounwind readnone { +; CHECK: test1: +; CHECK: movl foo@GOT +; CHECK-NEXT: jmpl + %1 = tail call double @foo(double %x) nounwind readnone + ret double %1 +} + +declare double @foo(double) readnone + +define double @test2(double %x) nounwind readnone { +; CHECK: test2: +; CHECK: movl sin@GOT +; CHECK-NEXT: jmpl + %1 = tail call double @sin(double %x) nounwind readnone + ret double %1 +} + +declare double @sin(double) readnone diff --git a/test/CodeGen/X86/trunc-ext-ld-st.ll b/test/CodeGen/X86/trunc-ext-ld-st.ll new file mode 100644 index 0000000..57d6e97 --- /dev/null +++ b/test/CodeGen/X86/trunc-ext-ld-st.ll @@ -0,0 +1,82 @@ +; RUN: llc < %s -march=x86-64 -mcpu=corei7 -promote-elements -mattr=+sse41 | FileCheck %s + +;CHECK: load_2_i8 +; A single 16-bit load +;CHECK: movzwl +;CHECK: pshufb +;CHECK: paddq +;CHECK: pshufb +; A single 16-bit store +;CHECK: movw +;CHECK: ret + +define void @load_2_i8(<2 x i8>* %A) { + %T = load <2 x i8>* %A + %G = add <2 x i8> %T, <i8 9, i8 7> + store <2 x i8> %G, <2 x i8>* %A + ret void +} + +;CHECK: load_2_i16 +; Read 32-bits +;CHECK: movd +;CHECK: pshufb +;CHECK: paddq +;CHECK: pshufb +;CHECK: movd +;CHECK: ret +define void @load_2_i16(<2 x i16>* %A) { + %T = load <2 x i16>* %A + %G = add <2 x i16> %T, <i16 9, i16 7> + store <2 x i16> %G, <2 x i16>* %A + ret void +} + +;CHECK: load_2_i32 +;CHECK: pshufd +;CHECK: paddq +;CHECK: pshufd +;CHECK: ret +define void @load_2_i32(<2 x i32>* %A) { + %T = load <2 x i32>* %A + %G = add <2 x i32> %T, <i32 9, i32 7> + store <2 x i32> %G, <2 x i32>* %A + ret void +} + +;CHECK: load_4_i8 +;CHECK: movd +;CHECK: pshufb +;CHECK: paddd +;CHECK: pshufb +;CHECK: ret +define void @load_4_i8(<4 x i8>* %A) { + %T = load <4 x i8>* %A + %G = add <4 x i8> %T, <i8 1, i8 4, i8 9, i8 7> + store <4 x i8> %G, <4 x i8>* %A + ret void +} + +;CHECK: load_4_i16 +;CHECK: punpcklwd +;CHECK: paddd +;CHECK: pshufb +;CHECK: ret +define void @load_4_i16(<4 x i16>* %A) { + %T = load <4 x i16>* %A + %G = add <4 x i16> %T, <i16 1, i16 4, i16 9, i16 7> + store <4 x i16> %G, <4 x i16>* %A + ret void +} + +;CHECK: load_8_i8 +;CHECK: punpcklbw +;CHECK: paddw +;CHECK: pshufb +;CHECK: ret +define void @load_8_i8(<8 x i8>* %A) { + %T = load <8 x i8>* %A + %G = add <8 x i8> %T, %T + store <8 x i8> %G, <8 x i8>* %A + ret void +} diff --git a/test/CodeGen/X86/twoaddr-sink-terminator.ll b/test/CodeGen/X86/twoaddr-sink-terminator.ll new file mode 100644 index 0000000..209d474 --- /dev/null +++ b/test/CodeGen/X86/twoaddr-sink-terminator.ll @@ -0,0 +1,43 @@ +; RUN: llc < %s -verify-coalescing +; PR10998 + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32" +target triple = "i386-unknown-freebsd8.2" + +define void @test(i32 %arg1) nounwind align 2 { +bb11: + %tmp13 = and i32 %arg1, 7 + %tmp14 = add i32 %tmp13, -5 + switch i32 %tmp13, label %bb18 [ + i32 0, label %bb21 + i32 4, label %bb22 + i32 3, label %bb21 + i32 2, label %bb19 + ] + +bb18: + %tmp202 = call i32 @f() nounwind + unreachable + +bb19: + %tmp20 = call i32 @f() nounwind + br label %bb24 + +bb21: + %tmp203 = call i32 @f() nounwind + br label %bb24 + +bb22: + %tmp23 = call i32 @f() nounwind + br label %bb24 + +bb24: + %tmp15 = icmp ult i32 %tmp14, 2 + %tmp55 = select i1 %tmp15, i32 45, i32 44 + %tmp56 = call i32 @f2(i32 %tmp55) + unreachable +} + +declare i32 @f() + +declare i32 @f2(i32) diff --git a/test/CodeGen/X86/uint64-to-float.ll b/test/CodeGen/X86/uint64-to-float.ll index d9f753c..1dbbdcf 100644 --- a/test/CodeGen/X86/uint64-to-float.ll +++ b/test/CodeGen/X86/uint64-to-float.ll @@ -6,12 +6,37 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.0.0" -; CHECK: testq %rdi, %rdi -; CHECK-NEXT: jns LBB0_2 +; FIXME: This test could generate this code: +; +; ## BB#0: ## %entry +; testq %rdi, %rdi +; jns LBB0_2 +; ## BB#1: +; movq %rdi, %rax +; shrq %rax +; andq $1, %rdi +; orq %rax, %rdi +; cvtsi2ssq %rdi, %xmm0 +; addss %xmm0, %xmm0 +; ret +; LBB0_2: ## %entry +; cvtsi2ssq %rdi, %xmm0 +; ret +; +; The blocks come from lowering: +; +; %vreg7<def> = CMOV_FR32 %vreg6<kill>, %vreg5<kill>, 15, %EFLAGS<imp-use>; FR32:%vreg7,%vreg6,%vreg5 +; +; If the instruction had an EFLAGS<kill> flag, it wouldn't need to mark EFLAGS +; as live-in on the new blocks, and machine sinking would be able to sink +; everything below the test. + ; CHECK: shrq -; CHECK-NEXT: andq +; CHECK: andq ; CHECK-NEXT: orq -; CHECK-NEXT: cvtsi2ss +; CHECK: testq %rdi, %rdi +; CHECK-NEXT: jns LBB0_2 +; CHECK: cvtsi2ss ; CHECK: LBB0_2 ; CHECK-NEXT: cvtsi2ss define float @test(i64 %a) { diff --git a/test/CodeGen/X86/uint_to_fp-2.ll b/test/CodeGen/X86/uint_to_fp-2.ll index da5105d..7536fb8 100644 --- a/test/CodeGen/X86/uint_to_fp-2.ll +++ b/test/CodeGen/X86/uint_to_fp-2.ll @@ -1,8 +1,33 @@ -; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movsd | count 1 -; rdar://6504833 +; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s -define float @f(i32 %x) nounwind readnone { +; rdar://6504833 +define float @test1(i32 %x) nounwind readnone { +; CHECK: test1 +; CHECK: movd +; CHECK: orpd +; CHECK: subsd +; CHECK: cvtsd2ss +; CHECK: movss +; CHECK: flds +; CHECK: ret entry: %0 = uitofp i32 %x to float ret float %0 } + +; PR10802 +define float @test2(<4 x i32> %x) nounwind readnone ssp { +; CHECK: test2 +; CHECK: xorps [[ZERO:%xmm[0-9]+]] +; CHECK: movss {{.*}}, [[ZERO]] +; CHECK: orps +; CHECK: subsd +; CHECK: cvtsd2ss +; CHECK: movss +; CHECK: flds +; CHECK: ret +entry: + %vecext = extractelement <4 x i32> %x, i32 0 + %conv = uitofp i32 %vecext to float + ret float %conv +} diff --git a/test/CodeGen/X86/v2f32.ll b/test/CodeGen/X86/v2f32.ll index 6d14099..ba54833 100644 --- a/test/CodeGen/X86/v2f32.ll +++ b/test/CodeGen/X86/v2f32.ll @@ -1,6 +1,6 @@ ; RUN: llc < %s -mtriple=x86_64-linux -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=X64 ; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=W64 -; RUN: llc < %s -mcpu=yonah -march=x86 -asm-verbose=0 -o - | FileCheck %s -check-prefix=X32 +; RUN: llc < %s -mcpu=yonah -march=x86 -mtriple=i386-linux-gnu -asm-verbose=0 -o - | FileCheck %s -check-prefix=X32 ; PR7518 define void @test1(<2 x float> %Q, float *%P2) nounwind { diff --git a/test/CodeGen/X86/vec_compare-sse4.ll b/test/CodeGen/X86/vec_compare-sse4.ll new file mode 100644 index 0000000..b4a4a4c --- /dev/null +++ b/test/CodeGen/X86/vec_compare-sse4.ll @@ -0,0 +1,35 @@ +; RUN: llc < %s -march=x86 -mattr=-sse3,+sse2 | FileCheck %s -check-prefix=SSE2 +; RUN: llc < %s -march=x86 -mattr=-sse42,+sse41 | FileCheck %s -check-prefix=SSE41 +; RUN: llc < %s -march=x86 -mattr=+sse42 | FileCheck %s -check-prefix=SSE42 + +define <2 x i64> @test1(<2 x i64> %A, <2 x i64> %B) nounwind { +; SSE42: test1: +; SSE42: pcmpgtq +; SSE42: ret +; SSE41: test1: +; SSE41-NOT: pcmpgtq +; SSE41: ret +; SSE2: test1: +; SSE2-NOT: pcmpgtq +; SSE2: ret + + %C = icmp sgt <2 x i64> %A, %B + %D = sext <2 x i1> %C to <2 x i64> + ret <2 x i64> %D +} + +define <2 x i64> @test2(<2 x i64> %A, <2 x i64> %B) nounwind { +; SSE42: test2: +; SSE42: pcmpeqq +; SSE42: ret +; SSE41: test2: +; SSE41: pcmpeqq +; SSE41: ret +; SSE2: test2: +; SSE2-NOT: pcmpeqq +; SSE2: ret + + %C = icmp eq <2 x i64> %A, %B + %D = sext <2 x i1> %C to <2 x i64> + ret <2 x i64> %D +} diff --git a/test/CodeGen/X86/vec_set-C.ll b/test/CodeGen/X86/vec_set-C.ll index 7636ac3..133f23b 100644 --- a/test/CodeGen/X86/vec_set-C.ll +++ b/test/CodeGen/X86/vec_set-C.ll @@ -1,6 +1,6 @@ -; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movq -; RUN: llc < %s -march=x86 -mattr=+sse2 | grep mov | count 1 -; RUN: llc < %s -march=x86-64 -mattr=+sse2 | grep movd +; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=+sse2 | grep movq +; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=+sse2 | grep mov | count 1 +; RUN: llc < %s -march=x86-64 -mtriple=x86_64-pc-linux -mattr=+sse2 | grep movd define <2 x i64> @t1(i64 %x) nounwind { %tmp8 = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0 diff --git a/test/CodeGen/X86/vec_shuffle-37.ll b/test/CodeGen/X86/vec_shuffle-37.ll index 2efdb14..e91a734 100644 --- a/test/CodeGen/X86/vec_shuffle-37.ll +++ b/test/CodeGen/X86/vec_shuffle-37.ll @@ -5,8 +5,8 @@ define <4 x i32> @t00(<4 x i32>* %a0) nounwind ssp { entry: ; CHECK: movaps ({{%rdi|%rcx}}), %xmm0 -; CHECK-NEXT: movaps %xmm0, %xmm1 -; CHECK-NEXT: movlps (%rax), %xmm1 +; CHECK: movaps %xmm0, %xmm1 +; CHECK-NEXT: movss %xmm2, %xmm1 ; CHECK-NEXT: shufps $36, %xmm1, %xmm0 %0 = load <4 x i32>* undef, align 16 %1 = load <4 x i32>* %a0, align 16 @@ -23,3 +23,23 @@ entry: store <2 x double> %vecinit94, <2 x double>* undef ret void } + +define void @t02(<8 x i32>* %source, <2 x i32>* %dest) nounwind noinline { +entry: +; CHECK: movl 36({{%rdi|%rcx}}) +; CHECK-NEXT: movl 48({{%rdi|%rcx}}) +; CHECK: punpcklqdq +; CHECK: movq %xmm0, ({{%rsi|%rdx}}) + %0 = bitcast <8 x i32>* %source to <4 x i32>* + %arrayidx = getelementptr inbounds <4 x i32>* %0, i64 3 + %tmp2 = load <4 x i32>* %arrayidx, align 16 + %tmp3 = extractelement <4 x i32> %tmp2, i32 0 + %tmp5 = insertelement <2 x i32> <i32 undef, i32 0>, i32 %tmp3, i32 0 + %arrayidx7 = getelementptr inbounds <8 x i32>* %source, i64 1 + %1 = bitcast <8 x i32>* %arrayidx7 to <4 x i32>* + %tmp8 = load <4 x i32>* %1, align 16 + %tmp9 = extractelement <4 x i32> %tmp8, i32 1 + %tmp11 = insertelement <2 x i32> %tmp5, i32 %tmp9, i32 1 + store <2 x i32> %tmp11, <2 x i32>* %dest, align 8 + ret void +} diff --git a/test/CodeGen/X86/vec_shuffle-38.ll b/test/CodeGen/X86/vec_shuffle-38.ll new file mode 100644 index 0000000..69a2ede --- /dev/null +++ b/test/CodeGen/X86/vec_shuffle-38.ll @@ -0,0 +1,59 @@ +; RUN: llc < %s -march=x86-64 | FileCheck %s + +define <2 x double> @ld(<2 x double> %p) nounwind optsize ssp { +; CHECK: unpcklpd + %shuffle = shufflevector <2 x double> %p, <2 x double> undef, <2 x i32> zeroinitializer + ret <2 x double> %shuffle +} + +define <2 x double> @hd(<2 x double> %p) nounwind optsize ssp { +; CHECK: unpckhpd + %shuffle = shufflevector <2 x double> %p, <2 x double> undef, <2 x i32> <i32 1, i32 1> + ret <2 x double> %shuffle +} + +define <2 x i64> @ldi(<2 x i64> %p) nounwind optsize ssp { +; CHECK: punpcklqdq + %shuffle = shufflevector <2 x i64> %p, <2 x i64> undef, <2 x i32> zeroinitializer + ret <2 x i64> %shuffle +} + +define <2 x i64> @hdi(<2 x i64> %p) nounwind optsize ssp { +; CHECK: punpckhqdq + %shuffle = shufflevector <2 x i64> %p, <2 x i64> undef, <2 x i32> <i32 1, i32 1> + ret <2 x i64> %shuffle +} + +; rdar://10050549 +%struct.Float2 = type { float, float } + +define <4 x float> @loadhpi(%struct.Float2* %vPtr, <4 x float> %vecin1) nounwind readonly ssp { +entry: +; CHECK: loadhpi +; CHECK-NOT: movq +; CHECK: movhps ( + %tmp1 = bitcast %struct.Float2* %vPtr to <1 x i64>* + %addptr7 = getelementptr inbounds <1 x i64>* %tmp1, i64 0 + %tmp2 = bitcast <1 x i64>* %addptr7 to float* + %tmp3 = load float* %tmp2, align 4 + %vec = insertelement <4 x float> undef, float %tmp3, i32 0 + %addptr.i12 = getelementptr inbounds float* %tmp2, i64 1 + %tmp4 = load float* %addptr.i12, align 4 + %vecin2 = insertelement <4 x float> %vec, float %tmp4, i32 1 + %shuffle = shufflevector <4 x float> %vecin1, <4 x float> %vecin2, <4 x i32> <i32 0, i32 1, i32 4, i32 5> + ret <4 x float> %shuffle +} + +; rdar://10119696 +; CHECK: f +define <4 x float> @f(<4 x float> %x, double* nocapture %y) nounwind uwtable readonly ssp { +entry: + ; CHECK: movsd (% + ; CHECK-NEXT: movsd %xmm + %u110.i = load double* %y, align 1 + %tmp8.i = insertelement <2 x double> undef, double %u110.i, i32 0 + %tmp9.i = bitcast <2 x double> %tmp8.i to <4 x float> + %shuffle.i = shufflevector <4 x float> %x, <4 x float> %tmp9.i, <4 x i32> <i32 4, i32 5, i32 2, i32 3> + ret <4 x float> %shuffle.i +} + diff --git a/test/CodeGen/X86/vsplit-and.ll b/test/CodeGen/X86/vsplit-and.ll index 97dacfd..ee98806 100644 --- a/test/CodeGen/X86/vsplit-and.ll +++ b/test/CodeGen/X86/vsplit-and.ll @@ -1,8 +1,9 @@ -; RUN: llc < %s -march=x86 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s - -define void @t(<2 x i64>* %dst, <2 x i64> %src1, <2 x i64> %src2) nounwind readonly { -; CHECK: andb +define void @t0(<2 x i64>* %dst, <2 x i64> %src1, <2 x i64> %src2) nounwind readonly { +; CHECK: t0 +; CHECK: pand +; CHECK: ret %cmp1 = icmp ne <2 x i64> %src1, zeroinitializer %cmp2 = icmp ne <2 x i64> %src2, zeroinitializer %t1 = and <2 x i1> %cmp1, %cmp2 @@ -12,7 +13,9 @@ define void @t(<2 x i64>* %dst, <2 x i64> %src1, <2 x i64> %src2) nounwind reado } define void @t2(<3 x i64>* %dst, <3 x i64> %src1, <3 x i64> %src2) nounwind readonly { -; CHECK: andb +; CHECK: t2 +; CHECK-NOT: pand +; CHECK: ret %cmp1 = icmp ne <3 x i64> %src1, zeroinitializer %cmp2 = icmp ne <3 x i64> %src2, zeroinitializer %t1 = and <3 x i1> %cmp1, %cmp2 diff --git a/test/CodeGen/X86/widen_arith-1.ll b/test/CodeGen/X86/widen_arith-1.ll index 4b8016d..85367e8 100644 --- a/test/CodeGen/X86/widen_arith-1.ll +++ b/test/CodeGen/X86/widen_arith-1.ll @@ -1,12 +1,10 @@ ; RUN: llc < %s -march=x86 -mattr=+sse42 | FileCheck %s -; Widen a v3i8 to v16i8 to use a vector add - define void @update(<3 x i8>* %dst, <3 x i8>* %src, i32 %n) nounwind { entry: ; CHECK-NOT: pextrw -; CHECK: paddb -; CHECK: pextrb +; CHECK: add + %dst.addr = alloca <3 x i8>* ; <<3 x i8>**> [#uses=2] %src.addr = alloca <3 x i8>* ; <<3 x i8>**> [#uses=2] %n.addr = alloca i32 ; <i32*> [#uses=2] diff --git a/test/CodeGen/X86/widen_arith-2.ll b/test/CodeGen/X86/widen_arith-2.ll index 03b3fea..d35abc3 100644 --- a/test/CodeGen/X86/widen_arith-2.ll +++ b/test/CodeGen/X86/widen_arith-2.ll @@ -1,5 +1,5 @@ ; RUN: llc < %s -march=x86 -mattr=+sse42 | FileCheck %s -; CHECK: paddb +; CHECK: padd ; CHECK: pand ; widen v8i8 to v16i8 (checks even power of 2 widening with add & and) diff --git a/test/CodeGen/X86/widen_arith-3.ll b/test/CodeGen/X86/widen_arith-3.ll index 0574923..11d56f5 100644 --- a/test/CodeGen/X86/widen_arith-3.ll +++ b/test/CodeGen/X86/widen_arith-3.ll @@ -1,7 +1,8 @@ ; RUN: llc < %s -march=x86 -mattr=+sse42 -post-RA-scheduler=true | FileCheck %s -; CHECK: paddw -; CHECK: pextrw -; CHECK: movd +; CHECK: incw +; CHECK: incl +; CHECK: incl +; CHECK: addl ; Widen a v3i16 to v8i16 to do a vector add diff --git a/test/CodeGen/X86/widen_cast-1.ll b/test/CodeGen/X86/widen_cast-1.ll index 1eace9e..4330aae 100644 --- a/test/CodeGen/X86/widen_cast-1.ll +++ b/test/CodeGen/X86/widen_cast-1.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=x86 -mattr=+sse42 < %s | FileCheck %s -; CHECK: paddw +; CHECK: paddd ; CHECK: pextrd ; CHECK: movd diff --git a/test/CodeGen/X86/widen_cast-4.ll b/test/CodeGen/X86/widen_cast-4.ll index 8e1adf5..5ea5426 100644 --- a/test/CodeGen/X86/widen_cast-4.ll +++ b/test/CodeGen/X86/widen_cast-4.ll @@ -1,16 +1,6 @@ ; RUN: llc < %s -march=x86 -mattr=+sse42 | FileCheck %s -; CHECK: sarb -; CHECK: sarb -; CHECK: sarb -; CHECK: sarb -; CHECK: sarb -; CHECK: sarb -; CHECK: sarb -; CHECK: sarb - -; v8i8 that is widen to v16i8 then split -; FIXME: This is widen to v16i8 and split to 16 and we then rebuild the vector. -; Unfortunately, we don't split the store so we don't get the code we want. +; CHECK: psraw +; CHECK: psraw define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind { entry: diff --git a/test/CodeGen/X86/widen_conv-1.ll b/test/CodeGen/X86/widen_conv-1.ll index f6810cd..51f1c88 100644 --- a/test/CodeGen/X86/widen_conv-1.ll +++ b/test/CodeGen/X86/widen_conv-1.ll @@ -1,6 +1,5 @@ ; RUN: llc < %s -march=x86 -mattr=+sse42 | FileCheck %s -; CHECK: pshufd -; CHECK: paddd +; CHECK: paddq ; truncate v2i64 to v2i32 diff --git a/test/CodeGen/X86/widen_conv-4.ll b/test/CodeGen/X86/widen_conv-4.ll index 80f3a49..affd796 100644 --- a/test/CodeGen/X86/widen_conv-4.ll +++ b/test/CodeGen/X86/widen_conv-4.ll @@ -1,5 +1,5 @@ ; RUN: llc < %s -march=x86 -mattr=+sse42 | FileCheck %s -; CHECK: cvtsi2ss +; CHECK-NOT: cvtsi2ss ; unsigned to float v7i16 to v7f32 diff --git a/test/CodeGen/X86/widen_load-0.ll b/test/CodeGen/X86/widen_load-0.ll index c91627c..4aeec91 100644 --- a/test/CodeGen/X86/widen_load-0.ll +++ b/test/CodeGen/X86/widen_load-0.ll @@ -1,18 +1,18 @@ -; RUN: llc < %s -o - -mtriple=x86_64-linux | FileCheck %s -; RUN: llc < %s -o - -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64 +; RUN: llc < %s -o - -mtriple=x86_64-linux -mcpu=corei7 | FileCheck %s +; RUN: llc < %s -o - -mtriple=x86_64-win32 -mcpu=corei7 | FileCheck %s -check-prefix=WIN64 ; PR4891 ; Both loads should happen before either store. -; CHECK: movl (%rdi), %[[R1:...]] -; CHECK: movl (%rsi), %[[R2:...]] -; CHECK: movl %[[R2]], (%rdi) -; CHECK: movl %[[R1]], (%rsi) +; CHECK: movd ({{.*}}), {{.*}} +; CHECK: movd ({{.*}}), {{.*}} +; CHECK: movd {{.*}}, ({{.*}}) +; CHECK: movd {{.*}}, ({{.*}}) -; WIN64: movl (%rcx), %[[R1:...]] -; WIN64: movl (%rdx), %[[R2:...]] -; WIN64: movl %[[R2]], (%rcx) -; WIN64: movl %[[R1]], (%rdx) +; WIN64: movd ({{.*}}), {{.*}} +; WIN64: movd ({{.*}}), {{.*}} +; WIN64: movd {{.*}}, ({{.*}}) +; WIN64: movd {{.*}}, ({{.*}}) define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwind { entry: diff --git a/test/CodeGen/X86/widen_load-2.ll b/test/CodeGen/X86/widen_load-2.ll index 6422063..71699b8 100644 --- a/test/CodeGen/X86/widen_load-2.ll +++ b/test/CodeGen/X86/widen_load-2.ll @@ -4,6 +4,7 @@ ; %i32vec3 = type <3 x i32> +; CHECK: add3i32 define void @add3i32(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) { ; CHECK: movdqa ; CHECK: paddd @@ -16,6 +17,7 @@ define void @add3i32(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) { ret void } +; CHECK: add3i32_2 define void @add3i32_2(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) { ; CHECK: movq ; CHECK: pinsrd @@ -32,6 +34,7 @@ define void @add3i32_2(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) { } %i32vec7 = type <7 x i32> +; CHECK: add7i32 define void @add7i32(%i32vec7* sret %ret, %i32vec7* %ap, %i32vec7* %bp) { ; CHECK: movdqa ; CHECK: movdqa @@ -47,6 +50,7 @@ define void @add7i32(%i32vec7* sret %ret, %i32vec7* %ap, %i32vec7* %bp) { ret void } +; CHECK: add12i32 %i32vec12 = type <12 x i32> define void @add12i32(%i32vec12* sret %ret, %i32vec12* %ap, %i32vec12* %bp) { ; CHECK: movdqa @@ -66,12 +70,14 @@ define void @add12i32(%i32vec12* sret %ret, %i32vec12* %ap, %i32vec12* %bp) { } +; CHECK: add3i16 %i16vec3 = type <3 x i16> define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp) nounwind { -; CHECK: movdqa -; CHECK: paddw -; CHECK: movd -; CHECK: pextrw +; CHECK: add3i16 +; CHECK: addl +; CHECK: addl +; CHECK: addl +; CHECK: ret %a = load %i16vec3* %ap, align 16 %b = load %i16vec3* %bp, align 16 %x = add %i16vec3 %a, %b @@ -79,10 +85,11 @@ define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp ret void } +; CHECK: add4i16 %i16vec4 = type <4 x i16> define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp) nounwind { -; CHECK: movdqa -; CHECK: paddw +; CHECK: add4i16 +; CHECK: paddd ; CHECK: movq %a = load %i16vec4* %ap, align 16 %b = load %i16vec4* %bp, align 16 @@ -91,6 +98,7 @@ define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp ret void } +; CHECK: add12i16 %i16vec12 = type <12 x i16> define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12* %bp) nounwind { ; CHECK: movdqa @@ -106,6 +114,7 @@ define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12* ret void } +; CHECK: add18i16 %i16vec18 = type <18 x i16> define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18* %bp) nounwind { ; CHECK: movdqa @@ -125,12 +134,13 @@ define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18* } +; CHECK: add3i8 %i8vec3 = type <3 x i8> define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) nounwind { -; CHECK: movdqa -; CHECK: paddb -; CHECK: pextrb -; CHECK: movb +; CHECK: addb +; CHECK: addb +; CHECK: addb +; CHECK: ret %a = load %i8vec3* %ap, align 16 %b = load %i8vec3* %bp, align 16 %x = add %i8vec3 %a, %b @@ -138,6 +148,7 @@ define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) no ret void } +; CHECK: add31i8: %i8vec31 = type <31 x i8> define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp) nounwind { ; CHECK: movdqa @@ -147,6 +158,7 @@ define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp ; CHECK: movq ; CHECK: pextrb ; CHECK: pextrw +; CHECK: ret %a = load %i8vec31* %ap, align 16 %b = load %i8vec31* %bp, align 16 %x = add %i8vec31 %a, %b @@ -155,9 +167,10 @@ define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp } +; CHECK: rot %i8vec3pack = type { <3 x i8>, i8 } define %i8vec3pack @rot() nounwind { -; CHECK: shrb +; CHECK: shrl entry: %X = alloca %i8vec3pack, align 4 %rot = alloca %i8vec3pack, align 4 diff --git a/test/CodeGen/X86/widen_shuffle-1.ll b/test/CodeGen/X86/widen_shuffle-1.ll index 034c42c..2df3b6a 100644 --- a/test/CodeGen/X86/widen_shuffle-1.ll +++ b/test/CodeGen/X86/widen_shuffle-1.ll @@ -30,6 +30,7 @@ entry: ; opA with opB, the DAG will produce new operations with opA. define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) nounwind { entry: +; CHECK: shuf3: ; CHECK: pshufd %shuffle.i.i.i12 = shufflevector <4 x float> %tmp10, <4 x float> %vecinit15, <4 x i32> <i32 0, i32 1, i32 4, i32 5> %tmp25.i.i = shufflevector <4 x float> %shuffle.i.i.i12, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> @@ -46,3 +47,10 @@ entry: ret void } +; PR10421: make sure we correctly handle extreme widening with CONCAT_VECTORS +define <8 x i8> @shuf4(<4 x i8> %a, <4 x i8> %b) nounwind readnone { +; CHECK: shuf4: +; CHECK-NOT: punpckldq + %vshuf = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + ret <8 x i8> %vshuf +} diff --git a/test/CodeGen/X86/x86-shifts.ll b/test/CodeGen/X86/x86-shifts.ll index fdf68f9..1cb07aa 100644 --- a/test/CodeGen/X86/x86-shifts.ll +++ b/test/CodeGen/X86/x86-shifts.ll @@ -124,7 +124,7 @@ entry: define <2 x i32> @shl2_other(<2 x i32> %A) nounwind { entry: ; CHECK: shl2_other -; CHECK-not: psllq +; CHECK: psllq %B = shl <2 x i32> %A, < i32 2, i32 2> %C = shl <2 x i32> %A, < i32 9, i32 9> %K = xor <2 x i32> %B, %C @@ -134,7 +134,7 @@ entry: define <2 x i32> @shr2_other(<2 x i32> %A) nounwind { entry: ; CHECK: shr2_other -; CHECK-NOT: psrlq +; CHECK: psrlq %B = lshr <2 x i32> %A, < i32 8, i32 8> %C = lshr <2 x i32> %A, < i32 1, i32 1> %K = xor <2 x i32> %B, %C |