aboutsummaryrefslogtreecommitdiffstats
path: root/test/Transforms/ObjCARC
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/ObjCARC')
-rw-r--r--test/Transforms/ObjCARC/apelim.ll53
-rw-r--r--test/Transforms/ObjCARC/basic.ll103
-rw-r--r--test/Transforms/ObjCARC/contract-storestrong-ivar.ll2
-rw-r--r--test/Transforms/ObjCARC/contract-storestrong.ll6
-rw-r--r--test/Transforms/ObjCARC/dg.exp3
-rw-r--r--test/Transforms/ObjCARC/invoke.ll36
-rw-r--r--test/Transforms/ObjCARC/lit.local.cfg1
-rw-r--r--test/Transforms/ObjCARC/nested.ll141
-rw-r--r--test/Transforms/ObjCARC/no-objc-arc-exceptions.ll122
-rw-r--r--test/Transforms/ObjCARC/pointer-types.ll31
-rw-r--r--test/Transforms/ObjCARC/pr12270.ll15
-rw-r--r--test/Transforms/ObjCARC/retain-block-load.ll51
-rw-r--r--test/Transforms/ObjCARC/retain-block.ll138
13 files changed, 686 insertions, 16 deletions
diff --git a/test/Transforms/ObjCARC/apelim.ll b/test/Transforms/ObjCARC/apelim.ll
new file mode 100644
index 0000000..8c7b5b1
--- /dev/null
+++ b/test/Transforms/ObjCARC/apelim.ll
@@ -0,0 +1,53 @@
+; RUN: opt -S -objc-arc-apelim < %s | FileCheck %s
+; rdar://10227311
+
+@llvm.global_ctors = appending global [2 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_x }, { i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_y }]
+
+@x = global i32 0
+
+declare i32 @bar() nounwind
+
+define i32 @foo() nounwind {
+entry:
+ ret i32 5
+}
+
+define internal void @__cxx_global_var_init() {
+entry:
+ %call = call i32 @foo()
+ store i32 %call, i32* @x, align 4
+ ret void
+}
+
+define internal void @__dxx_global_var_init() {
+entry:
+ %call = call i32 @bar()
+ store i32 %call, i32* @x, align 4
+ ret void
+}
+
+; CHECK: define internal void @_GLOBAL__I_x()
+; CHECK-NOT: @objc
+; CHECK: }
+define internal void @_GLOBAL__I_x() {
+entry:
+ %0 = call i8* @objc_autoreleasePoolPush() nounwind
+ call void @__cxx_global_var_init()
+ call void @objc_autoreleasePoolPop(i8* %0) nounwind
+ ret void
+}
+
+; CHECK: define internal void @_GLOBAL__I_y()
+; CHECK: %0 = call i8* @objc_autoreleasePoolPush() nounwind
+; CHECK: call void @objc_autoreleasePoolPop(i8* %0) nounwind
+; CHECK: }
+define internal void @_GLOBAL__I_y() {
+entry:
+ %0 = call i8* @objc_autoreleasePoolPush() nounwind
+ call void @__dxx_global_var_init()
+ call void @objc_autoreleasePoolPop(i8* %0) nounwind
+ ret void
+}
+
+declare i8* @objc_autoreleasePoolPush()
+declare void @objc_autoreleasePoolPop(i8*)
diff --git a/test/Transforms/ObjCARC/basic.ll b/test/Transforms/ObjCARC/basic.ll
index 861173b..08bd8c0 100644
--- a/test/Transforms/ObjCARC/basic.ll
+++ b/test/Transforms/ObjCARC/basic.ll
@@ -167,7 +167,7 @@ entry:
loop:
%c = bitcast i32* %x to i8*
call void @objc_release(i8* %c) nounwind
- %j = volatile load i1* %q
+ %j = load volatile i1* %q
br i1 %j, label %loop, label %return
return:
@@ -190,7 +190,7 @@ entry:
loop:
%a = bitcast i32* %x to i8*
%0 = call i8* @objc_retain(i8* %a) nounwind
- %j = volatile load i1* %q
+ %j = load volatile i1* %q
br i1 %j, label %loop, label %return
return:
@@ -786,7 +786,7 @@ C:
@__block_holder_tmp_1 = external constant %block1
define void @test23() {
entry:
- %0 = call i8* @objc_retainBlock(i8* bitcast (%block1* @__block_holder_tmp_1 to i8*)) nounwind
+ %0 = call i8* @objc_retainBlock(i8* bitcast (%block1* @__block_holder_tmp_1 to i8*)) nounwind, !clang.arc.copy_on_escape !0
call void @bar(i32 ()* bitcast (%block1* @__block_holder_tmp_1 to i32 ()*))
call void @bar(i32 ()* bitcast (%block1* @__block_holder_tmp_1 to i32 ()*))
call void @objc_release(i8* bitcast (%block1* @__block_holder_tmp_1 to i8*)) nounwind
@@ -801,13 +801,28 @@ entry:
; CHECK: }
define void @test23b(i8* %p) {
entry:
- %0 = call i8* @objc_retainBlock(i8* %p) nounwind
+ %0 = call i8* @objc_retainBlock(i8* %p) nounwind, !clang.arc.copy_on_escape !0
call void @callee()
call void @use_pointer(i8* %p)
call void @objc_release(i8* %p) nounwind
ret void
}
+; Don't optimize objc_retainBlock, because there's no copy_on_escape metadata.
+
+; CHECK: define void @test23c(
+; CHECK: @objc_retainBlock
+; CHECK: @objc_release
+; CHECK: }
+define void @test23c() {
+entry:
+ %0 = call i8* @objc_retainBlock(i8* bitcast (%block1* @__block_holder_tmp_1 to i8*)) nounwind
+ call void @bar(i32 ()* bitcast (%block1* @__block_holder_tmp_1 to i32 ()*))
+ call void @bar(i32 ()* bitcast (%block1* @__block_holder_tmp_1 to i32 ()*))
+ call void @objc_release(i8* bitcast (%block1* @__block_holder_tmp_1 to i8*)) nounwind
+ ret void
+}
+
; Any call can decrement a retain count.
; CHECK: define void @test24(
@@ -1528,9 +1543,11 @@ define void @test52(i8** %zz, i8** %pp) {
; Like test52, but the pointer has function type, so it's assumed to
; be not reference counted.
+; Oops. That's wrong. Clang sometimes uses function types gratuitously.
+; See rdar://10551239.
; CHECK: define void @test53(
-; CHECK-NOT: @objc_
+; CHECK: @objc_
; CHECK: }
define void @test53(void ()** %zz, i8** %pp) {
%p = load i8** %pp
@@ -1704,6 +1721,82 @@ define void @test61() {
ret void
}
+; Delete a retain matched by releases when one is inside the loop and the
+; other is outside the loop.
+
+; CHECK: define void @test62(
+; CHECK-NOT: @objc_
+; CHECK: }
+define void @test62(i8* %x, i1* %p) nounwind {
+entry:
+ br label %loop
+
+loop:
+ call i8* @objc_retain(i8* %x)
+ %q = load i1* %p
+ br i1 %q, label %loop.more, label %exit
+
+loop.more:
+ call void @objc_release(i8* %x)
+ br label %loop
+
+exit:
+ call void @objc_release(i8* %x)
+ ret void
+}
+
+; Like test62 but with no release in exit.
+; Don't delete anything!
+
+; CHECK: define void @test63(
+; CHECK: loop:
+; CHECK: tail call i8* @objc_retain(i8* %x)
+; CHECK: loop.more:
+; CHECK: call void @objc_release(i8* %x)
+; CHECK: }
+define void @test63(i8* %x, i1* %p) nounwind {
+entry:
+ br label %loop
+
+loop:
+ call i8* @objc_retain(i8* %x)
+ %q = load i1* %p
+ br i1 %q, label %loop.more, label %exit
+
+loop.more:
+ call void @objc_release(i8* %x)
+ br label %loop
+
+exit:
+ ret void
+}
+
+; Like test62 but with no release in loop.more.
+; Don't delete anything!
+
+; CHECK: define void @test64(
+; CHECK: loop:
+; CHECK: tail call i8* @objc_retain(i8* %x)
+; CHECK: exit:
+; CHECK: call void @objc_release(i8* %x)
+; CHECK: }
+define void @test64(i8* %x, i1* %p) nounwind {
+entry:
+ br label %loop
+
+loop:
+ call i8* @objc_retain(i8* %x)
+ %q = load i1* %p
+ br i1 %q, label %loop.more, label %exit
+
+loop.more:
+ br label %loop
+
+exit:
+ call void @objc_release(i8* %x)
+ ret void
+}
+
declare void @bar(i32 ()*)
; A few real-world testcases.
diff --git a/test/Transforms/ObjCARC/contract-storestrong-ivar.ll b/test/Transforms/ObjCARC/contract-storestrong-ivar.ll
index 4ad78e7..4a9b314 100644
--- a/test/Transforms/ObjCARC/contract-storestrong-ivar.ll
+++ b/test/Transforms/ObjCARC/contract-storestrong-ivar.ll
@@ -1,6 +1,6 @@
; RUN: opt -objc-arc-contract -S < %s | FileCheck %s
-; CHECK: call void @objc_storeStrong(i8**
+; CHECK: tail call void @objc_storeStrong(i8**
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin11.0.0"
diff --git a/test/Transforms/ObjCARC/contract-storestrong.ll b/test/Transforms/ObjCARC/contract-storestrong.ll
index 25c93f4..4ff0596 100644
--- a/test/Transforms/ObjCARC/contract-storestrong.ll
+++ b/test/Transforms/ObjCARC/contract-storestrong.ll
@@ -9,7 +9,7 @@ declare void @objc_release(i8*)
; CHECK: define void @test0(
; CHECK: entry:
-; CHECK-NEXT: call void @objc_storeStrong(i8** @x, i8* %p) nounwind
+; CHECK-NEXT: tail call void @objc_storeStrong(i8** @x, i8* %p) nounwind
; CHECK-NEXT: ret void
define void @test0(i8* %p) {
entry:
@@ -33,7 +33,7 @@ entry:
define void @test1(i8* %p) {
entry:
%0 = tail call i8* @objc_retain(i8* %p) nounwind
- %tmp = volatile load i8** @x, align 8
+ %tmp = load volatile i8** @x, align 8
store i8* %0, i8** @x, align 8
tail call void @objc_release(i8* %tmp) nounwind
ret void
@@ -53,7 +53,7 @@ define void @test2(i8* %p) {
entry:
%0 = tail call i8* @objc_retain(i8* %p) nounwind
%tmp = load i8** @x, align 8
- volatile store i8* %0, i8** @x, align 8
+ store volatile i8* %0, i8** @x, align 8
tail call void @objc_release(i8* %tmp) nounwind
ret void
}
diff --git a/test/Transforms/ObjCARC/dg.exp b/test/Transforms/ObjCARC/dg.exp
deleted file mode 100644
index f200589..0000000
--- a/test/Transforms/ObjCARC/dg.exp
+++ /dev/null
@@ -1,3 +0,0 @@
-load_lib llvm.exp
-
-RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
diff --git a/test/Transforms/ObjCARC/invoke.ll b/test/Transforms/ObjCARC/invoke.ll
index cf971e4..9e26209 100644
--- a/test/Transforms/ObjCARC/invoke.ll
+++ b/test/Transforms/ObjCARC/invoke.ll
@@ -2,6 +2,7 @@
declare i8* @objc_retain(i8*)
declare void @objc_release(i8*)
+declare i8* @objc_retainAutoreleasedReturnValue(i8*)
declare i8* @objc_msgSend(i8*, i8*, ...)
declare void @use_pointer(i8*)
declare void @callee()
@@ -68,6 +69,41 @@ done:
ret void
}
+; The optimizer should ignore invoke unwind paths consistently.
+; PR12265
+
+; CHECK: define void @test2() {
+; CHECK: invoke.cont:
+; CHECK-NEXT: call i8* @objc_retain
+; CHEK-NOT: @objc
+; CHECK: finally.cont:
+; CHECK-NEXT: call void @objc_release
+; CHEK-NOT: @objc
+; CHECK: finally.rethrow:
+; CHEK-NOT: @objc
+; CHECK: }
+define void @test2() {
+entry:
+ %call = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* ()*)()
+ to label %invoke.cont unwind label %finally.rethrow, !clang.arc.no_objc_arc_exceptions !0
+
+invoke.cont: ; preds = %entry
+ %tmp1 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+ call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void ()*)(), !clang.arc.no_objc_arc_exceptions !0
+ invoke void @use_pointer(i8* %call)
+ to label %finally.cont unwind label %finally.rethrow, !clang.arc.no_objc_arc_exceptions !0
+
+finally.cont: ; preds = %invoke.cont
+ tail call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
+ ret void
+
+finally.rethrow: ; preds = %invoke.cont, %entry
+ %tmp2 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*)
+ catch i8* null
+ unreachable
+}
+
declare i32 @__gxx_personality_v0(...)
+declare i32 @__objc_personality_v0(...)
!0 = metadata !{}
diff --git a/test/Transforms/ObjCARC/lit.local.cfg b/test/Transforms/ObjCARC/lit.local.cfg
new file mode 100644
index 0000000..19eebc0
--- /dev/null
+++ b/test/Transforms/ObjCARC/lit.local.cfg
@@ -0,0 +1 @@
+config.suffixes = ['.ll', '.c', '.cpp']
diff --git a/test/Transforms/ObjCARC/nested.ll b/test/Transforms/ObjCARC/nested.ll
index 9eada8a..a618a21 100644
--- a/test/Transforms/ObjCARC/nested.ll
+++ b/test/Transforms/ObjCARC/nested.ll
@@ -484,12 +484,14 @@ forcoll.empty:
ret void
}
-; Delete a nested retain+release pair.
+; TODO: Delete a nested retain+release pair.
+; The optimizer currently can't do this, because of a split loop backedge.
+; See test9b for the same testcase without a split backedge.
; CHECK: define void @test9(
; CHECK: call i8* @objc_retain
; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: call i8* @objc_retain
; CHECK: }
define void @test9() nounwind {
entry:
@@ -551,13 +553,79 @@ forcoll.empty:
ret void
}
-; Delete a nested retain+release pair.
+; Like test9, but without a split backedge. This we can optimize.
-; CHECK: define void @test10(
+; CHECK: define void @test9b(
; CHECK: call i8* @objc_retain
; CHECK: call i8* @objc_retain
; CHECK-NOT: @objc_retain
; CHECK: }
+define void @test9b() nounwind {
+entry:
+ %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
+ %items.ptr = alloca [16 x i8*], align 8
+ %call = call i8* @returner()
+ %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+ %call1 = call i8* @returner()
+ %1 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call1) nounwind
+ %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
+ call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
+ %2 = call i8* @objc_retain(i8* %0) nounwind
+ %tmp3 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+ %iszero = icmp eq i64 %call4, 0
+ br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
+
+forcoll.loopinit:
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr = load i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ br label %forcoll.loopbody.outer
+
+forcoll.loopbody.outer:
+ %forcoll.count.ph = phi i64 [ %call4, %forcoll.loopinit ], [ %call7, %forcoll.refetch ]
+ %tmp9 = icmp ugt i64 %forcoll.count.ph, 1
+ %umax = select i1 %tmp9, i64 %forcoll.count.ph, i64 1
+ br label %forcoll.loopbody
+
+forcoll.loopbody:
+ %forcoll.index = phi i64 [ %phitmp, %forcoll.notmutated ], [ 0, %forcoll.loopbody.outer ]
+ %mutationsptr5 = load i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64* %mutationsptr5, align 8
+ %3 = icmp eq i64 %statemutations, %forcoll.initial-mutations
+ br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
+
+forcoll.mutated:
+ call void @objc_enumerationMutation(i8* %2)
+ br label %forcoll.notmutated
+
+forcoll.notmutated:
+ %phitmp = add i64 %forcoll.index, 1
+ %exitcond = icmp eq i64 %phitmp, %umax
+ br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
+
+forcoll.refetch:
+ %tmp6 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+ %4 = icmp eq i64 %call7, 0
+ br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
+
+forcoll.empty:
+ call void @objc_release(i8* %2) nounwind
+ call void @objc_release(i8* %1) nounwind, !clang.imprecise_release !0
+ call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
+; TODO: Delete a nested retain+release pair.
+; The optimizer currently can't do this, because of a split loop backedge.
+; See test10b for the same testcase without a split backedge.
+
+; CHECK: define void @test10(
+; CHECK: call i8* @objc_retain
+; CHECK: call i8* @objc_retain
+; CHECK: call i8* @objc_retain
+; CHECK: }
define void @test10() nounwind {
entry:
%state.ptr = alloca %struct.__objcFastEnumerationState, align 8
@@ -618,3 +686,68 @@ forcoll.empty:
call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
ret void
}
+
+; Like test10, but without a split backedge. This we can optimize.
+
+; CHECK: define void @test10b(
+; CHECK: call i8* @objc_retain
+; CHECK: call i8* @objc_retain
+; CHECK-NOT: @objc_retain
+; CHECK: }
+define void @test10b() nounwind {
+entry:
+ %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
+ %items.ptr = alloca [16 x i8*], align 8
+ %call = call i8* @returner()
+ %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+ %call1 = call i8* @returner()
+ %1 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call1) nounwind
+ call void @callee()
+ %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
+ call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
+ %2 = call i8* @objc_retain(i8* %0) nounwind
+ %tmp3 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+ %iszero = icmp eq i64 %call4, 0
+ br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
+
+forcoll.loopinit:
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr = load i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ br label %forcoll.loopbody.outer
+
+forcoll.loopbody.outer:
+ %forcoll.count.ph = phi i64 [ %call4, %forcoll.loopinit ], [ %call7, %forcoll.refetch ]
+ %tmp9 = icmp ugt i64 %forcoll.count.ph, 1
+ %umax = select i1 %tmp9, i64 %forcoll.count.ph, i64 1
+ br label %forcoll.loopbody
+
+forcoll.loopbody:
+ %forcoll.index = phi i64 [ %phitmp, %forcoll.notmutated ], [ 0, %forcoll.loopbody.outer ]
+ %mutationsptr5 = load i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64* %mutationsptr5, align 8
+ %3 = icmp eq i64 %statemutations, %forcoll.initial-mutations
+ br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
+
+forcoll.mutated:
+ call void @objc_enumerationMutation(i8* %2)
+ br label %forcoll.notmutated
+
+forcoll.notmutated:
+ %phitmp = add i64 %forcoll.index, 1
+ %exitcond = icmp eq i64 %phitmp, %umax
+ br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
+
+forcoll.refetch:
+ %tmp6 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+ %4 = icmp eq i64 %call7, 0
+ br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
+
+forcoll.empty:
+ call void @objc_release(i8* %2) nounwind
+ call void @objc_release(i8* %1) nounwind, !clang.imprecise_release !0
+ call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+ ret void
+}
diff --git a/test/Transforms/ObjCARC/no-objc-arc-exceptions.ll b/test/Transforms/ObjCARC/no-objc-arc-exceptions.ll
new file mode 100644
index 0000000..9728f6e
--- /dev/null
+++ b/test/Transforms/ObjCARC/no-objc-arc-exceptions.ll
@@ -0,0 +1,122 @@
+; RUN: opt -S -objc-arc < %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+%struct.__block_byref_x = type { i8*, %struct.__block_byref_x*, i32, i32, i32 }
+%struct.__block_descriptor = type { i64, i64 }
+@_NSConcreteStackBlock = external global i8*
+@__block_descriptor_tmp = external hidden constant { i64, i64, i8*, i8*, i8*, i8* }
+
+; The optimizer should make use of the !clang.arc.no_objc_arc_exceptions
+; metadata and eliminate the retainBlock+release pair here.
+; rdar://10803830.
+
+; CHECK: define void @test0(
+; CHECK-NOT: @objc
+; CHECK: }
+define void @test0() {
+entry:
+ %x = alloca %struct.__block_byref_x, align 8
+ %block = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, align 8
+ %byref.isa = getelementptr inbounds %struct.__block_byref_x* %x, i64 0, i32 0
+ store i8* null, i8** %byref.isa, align 8
+ %byref.forwarding = getelementptr inbounds %struct.__block_byref_x* %x, i64 0, i32 1
+ store %struct.__block_byref_x* %x, %struct.__block_byref_x** %byref.forwarding, align 8
+ %byref.flags = getelementptr inbounds %struct.__block_byref_x* %x, i64 0, i32 2
+ store i32 0, i32* %byref.flags, align 8
+ %byref.size = getelementptr inbounds %struct.__block_byref_x* %x, i64 0, i32 3
+ store i32 32, i32* %byref.size, align 4
+ %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
+ store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa, align 8
+ %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
+ store i32 1107296256, i32* %block.flags, align 8
+ %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
+ store i32 0, i32* %block.reserved, align 4
+ %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
+ store i8* bitcast (void (i8*)* @__foo_block_invoke_0 to i8*), i8** %block.invoke, align 8
+ %block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
+ store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** %block.descriptor, align 8
+ %block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
+ %t1 = bitcast %struct.__block_byref_x* %x to i8*
+ store i8* %t1, i8** %block.captured, align 8
+ %t2 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
+ %t3 = call i8* @objc_retainBlock(i8* %t2) nounwind, !clang.arc.copy_on_escape !4
+ %t4 = getelementptr inbounds i8* %t3, i64 16
+ %t5 = bitcast i8* %t4 to i8**
+ %t6 = load i8** %t5, align 8
+ %t7 = bitcast i8* %t6 to void (i8*)*
+ invoke void %t7(i8* %t3)
+ to label %invoke.cont unwind label %lpad, !clang.arc.no_objc_arc_exceptions !4
+
+invoke.cont: ; preds = %entry
+ call void @objc_release(i8* %t3) nounwind, !clang.imprecise_release !4
+ call void @_Block_object_dispose(i8* %t1, i32 8)
+ ret void
+
+lpad: ; preds = %entry
+ %t8 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*)
+ cleanup
+ call void @_Block_object_dispose(i8* %t1, i32 8)
+ resume { i8*, i32 } %t8
+}
+
+; There is no !clang.arc.no_objc_arc_exceptions
+; metadata here, so the optimizer shouldn't eliminate anything.
+
+; CHECK: define void @test0_no_metadata(
+; CHECK: call i8* @objc_retainBlock(
+; CHECK: invoke
+; CHECK: call void @objc_release(
+; CHECK: }
+define void @test0_no_metadata() {
+entry:
+ %x = alloca %struct.__block_byref_x, align 8
+ %block = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, align 8
+ %byref.isa = getelementptr inbounds %struct.__block_byref_x* %x, i64 0, i32 0
+ store i8* null, i8** %byref.isa, align 8
+ %byref.forwarding = getelementptr inbounds %struct.__block_byref_x* %x, i64 0, i32 1
+ store %struct.__block_byref_x* %x, %struct.__block_byref_x** %byref.forwarding, align 8
+ %byref.flags = getelementptr inbounds %struct.__block_byref_x* %x, i64 0, i32 2
+ store i32 0, i32* %byref.flags, align 8
+ %byref.size = getelementptr inbounds %struct.__block_byref_x* %x, i64 0, i32 3
+ store i32 32, i32* %byref.size, align 4
+ %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
+ store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa, align 8
+ %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
+ store i32 1107296256, i32* %block.flags, align 8
+ %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
+ store i32 0, i32* %block.reserved, align 4
+ %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
+ store i8* bitcast (void (i8*)* @__foo_block_invoke_0 to i8*), i8** %block.invoke, align 8
+ %block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
+ store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** %block.descriptor, align 8
+ %block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
+ %t1 = bitcast %struct.__block_byref_x* %x to i8*
+ store i8* %t1, i8** %block.captured, align 8
+ %t2 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
+ %t3 = call i8* @objc_retainBlock(i8* %t2) nounwind, !clang.arc.copy_on_escape !4
+ %t4 = getelementptr inbounds i8* %t3, i64 16
+ %t5 = bitcast i8* %t4 to i8**
+ %t6 = load i8** %t5, align 8
+ %t7 = bitcast i8* %t6 to void (i8*)*
+ invoke void %t7(i8* %t3)
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont: ; preds = %entry
+ call void @objc_release(i8* %t3) nounwind, !clang.imprecise_release !4
+ call void @_Block_object_dispose(i8* %t1, i32 8)
+ ret void
+
+lpad: ; preds = %entry
+ %t8 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*)
+ cleanup
+ call void @_Block_object_dispose(i8* %t1, i32 8)
+ resume { i8*, i32 } %t8
+}
+
+declare i8* @objc_retainBlock(i8*)
+declare void @objc_release(i8*)
+declare void @_Block_object_dispose(i8*, i32)
+declare i32 @__objc_personality_v0(...)
+declare void @__foo_block_invoke_0(i8* nocapture) uwtable ssp
+
+!4 = metadata !{}
diff --git a/test/Transforms/ObjCARC/pointer-types.ll b/test/Transforms/ObjCARC/pointer-types.ll
new file mode 100644
index 0000000..6abc939
--- /dev/null
+++ b/test/Transforms/ObjCARC/pointer-types.ll
@@ -0,0 +1,31 @@
+; RUN: opt -objc-arc -S < %s | FileCheck %s
+
+; Don't hoist @objc_release past a use of its pointer, even
+; if the use has function type, because clang uses function types
+; in dubious ways.
+; rdar://10551239
+
+; CHECK: define void @test0(
+; CHECK: %otherBlock = phi void ()* [ %b1, %if.then ], [ null, %entry ]
+; CHECK-NEXT: call void @use_fptr(void ()* %otherBlock)
+; CHECK-NEXT: %tmp11 = bitcast void ()* %otherBlock to i8*
+; CHECK-NEXT: call void @objc_release(i8* %tmp11)
+
+define void @test0(i1 %tobool, void ()* %b1) {
+entry:
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %otherBlock = phi void ()* [ %b1, %if.then ], [ null, %entry ]
+ call void @use_fptr(void ()* %otherBlock)
+ %tmp11 = bitcast void ()* %otherBlock to i8*
+ call void @objc_release(i8* %tmp11) nounwind
+ ret void
+}
+
+declare void @use_fptr(void ()*)
+declare void @objc_release(i8*)
+
diff --git a/test/Transforms/ObjCARC/pr12270.ll b/test/Transforms/ObjCARC/pr12270.ll
new file mode 100644
index 0000000..30610f8
--- /dev/null
+++ b/test/Transforms/ObjCARC/pr12270.ll
@@ -0,0 +1,15 @@
+; RUN: opt -disable-output -objc-arc-contract %s
+; test that we don't crash on unreachable code
+%2 = type opaque
+
+define void @_i_Test__foo(%2 *%x) {
+entry:
+ unreachable
+
+return: ; No predecessors!
+ %bar = bitcast %2* %x to i8*
+ %foo = call i8* @objc_autoreleaseReturnValue(i8* %bar) nounwind
+ ret void
+}
+
+declare i8* @objc_autoreleaseReturnValue(i8*)
diff --git a/test/Transforms/ObjCARC/retain-block-load.ll b/test/Transforms/ObjCARC/retain-block-load.ll
new file mode 100644
index 0000000..a5170e3
--- /dev/null
+++ b/test/Transforms/ObjCARC/retain-block-load.ll
@@ -0,0 +1,51 @@
+; RUN: opt -objc-arc -S < %s | FileCheck %s
+
+; rdar://10803830
+; The optimizer should be able to prove that the block does not
+; "escape", so the retainBlock+release pair can be eliminated.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+%struct.__block_descriptor = type { i64, i64 }
+
+@_NSConcreteStackBlock = external global i8*
+@__block_descriptor_tmp = external global { i64, i64, i8*, i8* }
+
+; CHECK: define void @test() {
+; CHECK-NOT: @objc
+; CHECK: declare i8* @objc_retainBlock(i8*)
+; CHECK: declare void @objc_release(i8*)
+
+define void @test() {
+entry:
+ %block = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, align 8
+ %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* %block, i64 0, i32 0
+ store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa, align 8
+ %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* %block, i64 0, i32 1
+ store i32 1073741824, i32* %block.flags, align 8
+ %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* %block, i64 0, i32 2
+ store i32 0, i32* %block.reserved, align 4
+ %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* %block, i64 0, i32 3
+ store i8* bitcast (i32 (i8*)* @__test_block_invoke_0 to i8*), i8** %block.invoke, align 8
+ %block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* %block, i64 0, i32 4
+ store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** %block.descriptor, align 8
+ %block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* %block, i64 0, i32 5
+ store i32 4, i32* %block.captured, align 8
+ %tmp = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* %block to i8*
+ %tmp1 = call i8* @objc_retainBlock(i8* %tmp) nounwind, !clang.arc.copy_on_escape !0
+ %tmp2 = getelementptr inbounds i8* %tmp1, i64 16
+ %tmp3 = bitcast i8* %tmp2 to i8**
+ %tmp4 = load i8** %tmp3, align 8
+ %tmp5 = bitcast i8* %tmp4 to i32 (i8*)*
+ %call = call i32 %tmp5(i8* %tmp1)
+ call void @objc_release(i8* %tmp1) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
+declare i32 @__test_block_invoke_0(i8* nocapture %.block_descriptor) nounwind readonly
+
+declare i8* @objc_retainBlock(i8*)
+
+declare void @objc_release(i8*)
+
+!0 = metadata !{}
diff --git a/test/Transforms/ObjCARC/retain-block.ll b/test/Transforms/ObjCARC/retain-block.ll
new file mode 100644
index 0000000..b3b62d3
--- /dev/null
+++ b/test/Transforms/ObjCARC/retain-block.ll
@@ -0,0 +1,138 @@
+; RUN: opt -objc-arc -S < %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64"
+
+!0 = metadata !{}
+
+declare i8* @objc_retain(i8*)
+declare void @callee(i8)
+declare void @use_pointer(i8*)
+declare void @objc_release(i8*)
+declare i8* @objc_retainBlock(i8*)
+declare i8* @objc_autorelease(i8*)
+
+; Basic retainBlock+release elimination.
+
+; CHECK: define void @test0(i8* %tmp) {
+; CHECK-NOT: @objc
+; CHECK: }
+define void @test0(i8* %tmp) {
+entry:
+ %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind, !clang.arc.copy_on_escape !0
+ tail call void @use_pointer(i8* %tmp2)
+ tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
+; Same as test0, but there's no copy_on_escape metadata, so there's no
+; optimization possible.
+
+; CHECK: define void @test0_no_metadata(i8* %tmp) {
+; CHECK: %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind
+; CHECK: tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+; CHECK: }
+define void @test0_no_metadata(i8* %tmp) {
+entry:
+ %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind
+ tail call void @use_pointer(i8* %tmp2)
+ tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
+; Same as test0, but the pointer escapes, so there's no
+; optimization possible.
+
+; CHECK: define void @test0_escape(i8* %tmp, i8** %z) {
+; CHECK: %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind, !clang.arc.copy_on_escape !0
+; CHECK: tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+; CHECK: }
+define void @test0_escape(i8* %tmp, i8** %z) {
+entry:
+ %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind, !clang.arc.copy_on_escape !0
+ store i8* %tmp2, i8** %z
+ tail call void @use_pointer(i8* %tmp2)
+ tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
+; Same as test0_escape, but there's no intervening call.
+
+; CHECK: define void @test0_just_escape(i8* %tmp, i8** %z) {
+; CHECK: %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind, !clang.arc.copy_on_escape !0
+; CHECK: tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+; CHECK: }
+define void @test0_just_escape(i8* %tmp, i8** %z) {
+entry:
+ %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind, !clang.arc.copy_on_escape !0
+ store i8* %tmp2, i8** %z
+ tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
+; Basic nested retainBlock+release elimination.
+
+; CHECK: define void @test1(i8* %tmp) {
+; CHECK-NOT: @objc
+; CHECK: tail call i8* @objc_retain(i8* %tmp) nounwind
+; CHECK-NOT: @objc
+; CHECK: tail call void @objc_release(i8* %tmp) nounwind, !clang.imprecise_release !0
+; CHECK-NOT: @objc
+; CHECK: }
+define void @test1(i8* %tmp) {
+entry:
+ %tmp1 = tail call i8* @objc_retain(i8* %tmp) nounwind
+ %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind, !clang.arc.copy_on_escape !0
+ tail call void @use_pointer(i8* %tmp2)
+ tail call void @use_pointer(i8* %tmp2)
+ tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+ tail call void @objc_release(i8* %tmp) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
+; Same as test1, but there's no copy_on_escape metadata, so there's no
+; retainBlock+release optimization possible. But we can still eliminate
+; the outer retain+release.
+
+; CHECK: define void @test1_no_metadata(i8* %tmp) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: tail call i8* @objc_retainBlock(i8* %tmp) nounwind
+; CHECK-NEXT: @use_pointer(i8* %tmp2)
+; CHECK-NEXT: @use_pointer(i8* %tmp2)
+; CHECK-NEXT: tail call void @objc_release(i8* %tmp) nounwind, !clang.imprecise_release !0
+; CHECK-NOT: @objc
+; CHECK: }
+define void @test1_no_metadata(i8* %tmp) {
+entry:
+ %tmp1 = tail call i8* @objc_retain(i8* %tmp) nounwind
+ %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind
+ tail call void @use_pointer(i8* %tmp2)
+ tail call void @use_pointer(i8* %tmp2)
+ tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+ tail call void @objc_release(i8* %tmp) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
+; Same as test1, but the pointer escapes, so there's no
+; retainBlock+release optimization possible. But we can still eliminate
+; the outer retain+release
+
+; CHECK: define void @test1_escape(i8* %tmp, i8** %z) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind, !clang.arc.copy_on_escape !0
+; CHECK-NEXT: store i8* %tmp2, i8** %z
+; CHECK-NEXT: @use_pointer(i8* %tmp2)
+; CHECK-NEXT: @use_pointer(i8* %tmp2)
+; CHECK-NEXT: tail call void @objc_release(i8* %tmp) nounwind, !clang.imprecise_release !0
+; CHECK-NOT: @objc
+; CHECK: }
+define void @test1_escape(i8* %tmp, i8** %z) {
+entry:
+ %tmp1 = tail call i8* @objc_retain(i8* %tmp) nounwind
+ %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind, !clang.arc.copy_on_escape !0
+ store i8* %tmp2, i8** %z
+ tail call void @use_pointer(i8* %tmp2)
+ tail call void @use_pointer(i8* %tmp2)
+ tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+ tail call void @objc_release(i8* %tmp) nounwind, !clang.imprecise_release !0
+ ret void
+}