aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/X86/jump_sign.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/X86/jump_sign.ll')
-rw-r--r--test/CodeGen/X86/jump_sign.ll24
1 files changed, 11 insertions, 13 deletions
diff --git a/test/CodeGen/X86/jump_sign.ll b/test/CodeGen/X86/jump_sign.ll
index dfa8aed..31a7af3 100644
--- a/test/CodeGen/X86/jump_sign.ll
+++ b/test/CodeGen/X86/jump_sign.ll
@@ -164,7 +164,7 @@ entry:
; PR://13046
define void @func_o() nounwind uwtable {
entry:
- %0 = load i16* undef, align 2
+ %0 = load i16, i16* undef, align 2
br i1 undef, label %if.then.i, label %if.end.i
if.then.i: ; preds = %entry
@@ -217,17 +217,15 @@ entry:
; PR13475
; If we have sub a, b and cmp b, a and the result of cmp is used
; by sbb, we should not optimize cmp away.
-define i32 @func_q(i32 %j.4, i32 %w, i32 %el) {
+define i32 @func_q(i32 %a0, i32 %a1, i32 %a2) {
; CHECK-LABEL: func_q:
; CHECK: cmp
; CHECK-NEXT: sbb
- %tmp532 = add i32 %j.4, %w
- %tmp533 = icmp ugt i32 %tmp532, %el
- %tmp534 = icmp ult i32 %w, %el
- %or.cond = and i1 %tmp533, %tmp534
- %tmp535 = sub i32 %el, %w
- %j.5 = select i1 %or.cond, i32 %tmp535, i32 %j.4
- ret i32 %j.5
+ %1 = icmp ult i32 %a0, %a1
+ %2 = sub i32 %a1, %a0
+ %3 = select i1 %1, i32 -1, i32 0
+ %4 = xor i32 %2, %3
+ ret i32 %4
}
; rdar://11873276
define i8* @func_r(i8* %base, i32* nocapture %offset, i32 %size) nounwind {
@@ -238,14 +236,14 @@ entry:
; CHECK: j
; CHECK-NOT: sub
; CHECK: ret
- %0 = load i32* %offset, align 8
+ %0 = load i32, i32* %offset, align 8
%cmp = icmp slt i32 %0, %size
br i1 %cmp, label %return, label %if.end
if.end:
%sub = sub nsw i32 %0, %size
store i32 %sub, i32* %offset, align 8
- %add.ptr = getelementptr inbounds i8* %base, i32 %sub
+ %add.ptr = getelementptr inbounds i8, i8* %base, i32 %sub
br label %return
return:
@@ -287,10 +285,10 @@ entry:
; CHECK: andb
; CHECK: j
; CHECK: ret
- %0 = load i32* @b, align 4
+ %0 = load i32, i32* @b, align 4
%cmp = icmp ult i32 %0, %p1
%conv = zext i1 %cmp to i32
- %1 = load i32* @a, align 4
+ %1 = load i32, i32* @a, align 4
%and = and i32 %conv, %1
%conv1 = trunc i32 %and to i8
%2 = urem i8 %conv1, 3