diff options
author | Pirama Arumuga Nainar <pirama@google.com> | 2015-04-10 21:22:52 +0000 |
---|---|---|
committer | Gerrit Code Review <noreply-gerritcodereview@google.com> | 2015-04-10 21:23:04 +0000 |
commit | 31195f0bdca6ee2a5e72d07edf13e1d81206d949 (patch) | |
tree | 1b2c9792582e12f5af0b1512e3094425f0dc0df9 /test/CodeGen/X86/vector-idiv.ll | |
parent | c75239e6119d0f9a74c57099d91cbc9bde56bf33 (diff) | |
parent | 4c5e43da7792f75567b693105cc53e3f1992ad98 (diff) | |
download | external_llvm-31195f0bdca6ee2a5e72d07edf13e1d81206d949.zip external_llvm-31195f0bdca6ee2a5e72d07edf13e1d81206d949.tar.gz external_llvm-31195f0bdca6ee2a5e72d07edf13e1d81206d949.tar.bz2 |
Merge "Update aosp/master llvm for rebase to r233350"
Diffstat (limited to 'test/CodeGen/X86/vector-idiv.ll')
-rw-r--r-- | test/CodeGen/X86/vector-idiv.ll | 278 |
1 files changed, 143 insertions, 135 deletions
diff --git a/test/CodeGen/X86/vector-idiv.ll b/test/CodeGen/X86/vector-idiv.ll index 06ce543..2e482a0 100644 --- a/test/CodeGen/X86/vector-idiv.ll +++ b/test/CodeGen/X86/vector-idiv.ll @@ -4,7 +4,7 @@ target triple = "x86_64-unknown-unknown" -define <4 x i32> @test1(<4 x i32> %a) { +define <4 x i32> @test1(<4 x i32> %a) #0 { ; SSE41-LABEL: test1: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757] @@ -55,7 +55,7 @@ define <4 x i32> @test1(<4 x i32> %a) { ret <4 x i32> %div } -define <8 x i32> @test2(<8 x i32> %a) { +define <8 x i32> @test2(<8 x i32> %a) #0 { ; SSE41-LABEL: test2: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] @@ -126,7 +126,7 @@ define <8 x i32> @test2(<8 x i32> %a) { ret <8 x i32> %div } -define <8 x i16> @test3(<8 x i16> %a) { +define <8 x i16> @test3(<8 x i16> %a) #0 { ; SSE41-LABEL: test3: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363] @@ -159,7 +159,7 @@ define <8 x i16> @test3(<8 x i16> %a) { ret <8 x i16> %div } -define <16 x i16> @test4(<16 x i16> %a) { +define <16 x i16> @test4(<16 x i16> %a) #0 { ; SSE41-LABEL: test4: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9363,9363,9363,9363,9363,9363,9363,9363] @@ -204,7 +204,7 @@ define <16 x i16> @test4(<16 x i16> %a) { ret <16 x i16> %div } -define <8 x i16> @test5(<8 x i16> %a) { +define <8 x i16> @test5(<8 x i16> %a) #0 { ; SSE41-LABEL: test5: ; SSE41: # BB#0: ; SSE41-NEXT: pmulhw {{.*}}(%rip), %xmm0 @@ -234,7 +234,7 @@ define <8 x i16> @test5(<8 x i16> %a) { ret <8 x i16> %div } -define <16 x i16> @test6(<16 x i16> %a) { +define <16 x i16> @test6(<16 x i16> %a) #0 { ; SSE41-LABEL: test6: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725] @@ -276,7 +276,7 @@ define <16 x i16> @test6(<16 x i16> %a) { ret <16 x i16> %div } -define <16 x i8> @test7(<16 x i8> %a) { +define <16 x i8> @test7(<16 x i8> %a) #0 { ; SSE41-LABEL: test7: ; SSE41: # BB#0: ; SSE41-NEXT: pextrb $1, %xmm0, %eax @@ -460,6 +460,9 @@ define <16 x i8> @test7(<16 x i8> %a) { ; ; SSE-LABEL: test7: ; SSE: # BB#0: +; SSE-NEXT: pushq %rbp +; SSE-NEXT: pushq %r14 +; SSE-NEXT: pushq %rbx ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax ; SSE-NEXT: imull $-109, %eax, %ecx @@ -471,156 +474,156 @@ define <16 x i8> @test7(<16 x i8> %a) { ; SSE-NEXT: addb %al, %cl ; SSE-NEXT: movzbl %cl, %eax ; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r14d +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %edx +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r9d ; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax -; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r11d +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r8d +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi +; SSE-NEXT: imull $-109, %esi, %edi +; SSE-NEXT: shrl $8, %edi +; SSE-NEXT: addb %sil, %dil +; SSE-NEXT: movb %dil, %bl +; SSE-NEXT: shrb $7, %bl +; SSE-NEXT: sarb $2, %dil +; SSE-NEXT: addb %bl, %dil +; SSE-NEXT: movzbl %dil, %esi +; SSE-NEXT: movd %esi, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al +; SSE-NEXT: imull $-109, %eax, %esi +; SSE-NEXT: shrl $8, %esi +; SSE-NEXT: addb %al, %sil +; SSE-NEXT: movb %sil, %al ; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: sarb $2, %sil +; SSE-NEXT: addb %al, %sil +; SSE-NEXT: movzbl %sil, %eax ; SSE-NEXT: movd %eax, %xmm2 -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %ebp +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r10d +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %edi +; SSE-NEXT: imull $-109, %edi, %ebx +; SSE-NEXT: shrl $8, %ebx +; SSE-NEXT: addb %dil, %bl +; SSE-NEXT: movb %bl, %al ; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: sarb $2, %bl +; SSE-NEXT: addb %al, %bl +; SSE-NEXT: movzbl %bl, %eax ; SSE-NEXT: movd %eax, %xmm0 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %edx, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movb %al, %dl +; SSE-NEXT: shrb $7, %dl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm1 -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %esi, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %sil, %al +; SSE-NEXT: movb %al, %dl +; SSE-NEXT: shrb $7, %dl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %ecx, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movb %al, %cl +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm3 +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx ; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al +; SSE-NEXT: imull $-109, %eax, %edx +; SSE-NEXT: shrl $8, %edx +; SSE-NEXT: addb %al, %dl +; SSE-NEXT: movb %dl, %al ; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: sarb $2, %dl +; SSE-NEXT: addb %al, %dl +; SSE-NEXT: movzbl %dl, %eax ; SSE-NEXT: movd %eax, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %r14d, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %r14b, %al +; SSE-NEXT: movb %al, %dl +; SSE-NEXT: shrb $7, %dl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm2 -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %ebp, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %bpl, %al +; SSE-NEXT: movb %al, %dl +; SSE-NEXT: shrb $7, %dl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm0 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %r11d, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %r11b, %al +; SSE-NEXT: movb %al, %dl +; SSE-NEXT: shrb $7, %dl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm3 -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %ecx, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movb %al, %cl +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %r9d, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %r9b, %al +; SSE-NEXT: movb %al, %cl +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm0 -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %r10d, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %r10b, %al +; SSE-NEXT: movb %al, %cl +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %r8d, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %r8b, %al +; SSE-NEXT: movb %al, %cl +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm4 ; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax ; SSE-NEXT: imull $-109, %eax, %ecx @@ -636,6 +639,9 @@ define <16 x i8> @test7(<16 x i8> %a) { ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE-NEXT: popq %rbx +; SSE-NEXT: popq %r14 +; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; ; AVX-LABEL: test7: @@ -821,7 +827,7 @@ define <16 x i8> @test7(<16 x i8> %a) { ret <16 x i8> %div } -define <4 x i32> @test8(<4 x i32> %a) { +define <4 x i32> @test8(<4 x i32> %a) #0 { ; SSE41-LABEL: test8: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027] @@ -883,7 +889,7 @@ define <4 x i32> @test8(<4 x i32> %a) { ret <4 x i32> %div } -define <8 x i32> @test9(<8 x i32> %a) { +define <8 x i32> @test9(<8 x i32> %a) #0 { ; SSE41-LABEL: test9: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2454267027,2454267027,2454267027,2454267027] @@ -977,7 +983,7 @@ define <8 x i32> @test9(<8 x i32> %a) { ret <8 x i32> %div } -define <8 x i32> @test10(<8 x i32> %a) { +define <8 x i32> @test10(<8 x i32> %a) #0 { ; SSE41-LABEL: test10: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] @@ -1075,7 +1081,7 @@ define <8 x i32> @test10(<8 x i32> %a) { ret <8 x i32> %rem } -define <8 x i32> @test11(<8 x i32> %a) { +define <8 x i32> @test11(<8 x i32> %a) #0 { ; SSE41-LABEL: test11: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027] @@ -1188,7 +1194,7 @@ define <8 x i32> @test11(<8 x i32> %a) { ret <8 x i32> %rem } -define <2 x i16> @test12() { +define <2 x i16> @test12() #0 { ; SSE41-LABEL: test12: ; SSE41: # BB#0: ; SSE41-NEXT: xorps %xmm0, %xmm0 @@ -1209,7 +1215,7 @@ define <2 x i16> @test12() { ret <2 x i16> %B9 } -define <4 x i32> @PR20355(<4 x i32> %a) { +define <4 x i32> @PR20355(<4 x i32> %a) #0 { ; SSE41-LABEL: PR20355: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766] @@ -1263,3 +1269,5 @@ entry: %sdiv = sdiv <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3> ret <4 x i32> %sdiv } + +attributes #0 = { nounwind } |