From e09cd8d42b7621050d2dcdccc37ee341a1b553d5 Mon Sep 17 00:00:00 2001 From: Bill Wendling Date: Sat, 14 Dec 2013 08:01:30 +0000 Subject: Merging r197228: ------------------------------------------------------------------------ r197228 | d0k | 2013-12-13 05:40:24 -0800 (Fri, 13 Dec 2013) | 8 lines X86: When lowering shl_parts, don't emit shift amounts larger than the bit width. While it's safe for the X86-specific shift nodes, dag combining will kill generic nodes. Insert an AND to make it safe, isel will nuke it as x86's shift instructions have an implicit AND. Fixes PR16108, which contains a contraption to hit this case in between constant folders. ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_34@197321 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/legalize-shift-64.ll | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'test') diff --git a/test/CodeGen/X86/legalize-shift-64.ll b/test/CodeGen/X86/legalize-shift-64.ll index 7736468..64460bb 100644 --- a/test/CodeGen/X86/legalize-shift-64.ll +++ b/test/CodeGen/X86/legalize-shift-64.ll @@ -64,3 +64,31 @@ define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) { ; CHECK: shl ; CHECK: shldl } + +; PR16108 +define i32 @test6() { + %x = alloca i32, align 4 + %t = alloca i64, align 8 + store i32 1, i32* %x, align 4 + store i64 1, i64* %t, align 8 ;; DEAD + %load = load i32* %x, align 4 + %shl = shl i32 %load, 8 + %add = add i32 %shl, -224 + %sh_prom = zext i32 %add to i64 + %shl1 = shl i64 1, %sh_prom + %cmp = icmp ne i64 %shl1, 4294967296 + br i1 %cmp, label %if.then, label %if.end + +if.then: ; preds = %entry + ret i32 1 + +if.end: ; preds = %entry + ret i32 0 + +; CHECK-LABEL: test6: +; CHECK-NOT: andb $31 +; CHECK: sete +; CHECK: movzbl +; CHECK: xorl $1 +; CHECK: orl +} -- cgit v1.1