diff options
Diffstat (limited to 'test/CodeGen/X86/vector-idiv.ll')
-rw-r--r-- | test/CodeGen/X86/vector-idiv.ll | 436 |
1 files changed, 223 insertions, 213 deletions
diff --git a/test/CodeGen/X86/vector-idiv.ll b/test/CodeGen/X86/vector-idiv.ll index 4b269dc..06ce543 100644 --- a/test/CodeGen/X86/vector-idiv.ll +++ b/test/CodeGen/X86/vector-idiv.ll @@ -8,16 +8,15 @@ define <4 x i32> @test1(<4 x i32> %a) { ; SSE41-LABEL: test1: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757] -; SSE41-NEXT: movdqa %xmm0, %xmm2 -; SSE41-NEXT: pmuludq %xmm1, %xmm2 -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuludq %xmm1, %xmm3 -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE41-NEXT: psubd %xmm2, %xmm0 +; SSE41-NEXT: pmuludq %xmm2, %xmm3 +; SSE41-NEXT: pmuludq %xmm0, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] +; SSE41-NEXT: psubd %xmm1, %xmm0 ; SSE41-NEXT: psrld $1, %xmm0 -; SSE41-NEXT: paddd %xmm2, %xmm0 +; SSE41-NEXT: paddd %xmm1, %xmm0 ; SSE41-NEXT: psrld $2, %xmm0 ; SSE41-NEXT: retq ; @@ -26,11 +25,12 @@ define <4 x i32> @test1(<4 x i32> %a) { ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757] ; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: pmuludq %xmm1, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE-NEXT: pmuludq %xmm1, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE-NEXT: psubd %xmm2, %xmm0 ; SSE-NEXT: psrld $1, %xmm0 ; SSE-NEXT: paddd %xmm2, %xmm0 @@ -40,12 +40,12 @@ define <4 x i32> @test1(<4 x i32> %a) { ; AVX-LABEL: test1: ; AVX: # BB#0: ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 -; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3] -; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2,1,3] +; AVX-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] ; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsrld $1, %xmm0, %xmm0 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 @@ -59,22 +59,22 @@ define <8 x i32> @test2(<8 x i32> %a) { ; SSE41-LABEL: test2: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: pmuludq %xmm2, %xmm3 -; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuludq %xmm4, %xmm5 -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3] -; SSE41-NEXT: psubd %xmm3, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm3, %xmm4 +; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: pmuludq %xmm2, %xmm5 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7] +; SSE41-NEXT: psubd %xmm5, %xmm0 ; SSE41-NEXT: psrld $1, %xmm0 -; SSE41-NEXT: paddd %xmm3, %xmm0 +; SSE41-NEXT: paddd %xmm5, %xmm0 ; SSE41-NEXT: psrld $2, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm3, %xmm4 ; SSE41-NEXT: pmuludq %xmm1, %xmm2 -; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] -; SSE41-NEXT: pmuludq %xmm4, %xmm3 -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7] ; SSE41-NEXT: psubd %xmm2, %xmm1 ; SSE41-NEXT: psrld $1, %xmm1 ; SSE41-NEXT: paddd %xmm2, %xmm1 @@ -86,20 +86,22 @@ define <8 x i32> @test2(<8 x i32> %a) { ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] ; SSE-NEXT: movdqa %xmm0, %xmm3 ; SSE-NEXT: pmuludq %xmm2, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] ; SSE-NEXT: psubd %xmm3, %xmm0 ; SSE-NEXT: psrld $1, %xmm0 ; SSE-NEXT: paddd %xmm3, %xmm0 ; SSE-NEXT: psrld $2, %xmm0 ; SSE-NEXT: pmuludq %xmm1, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: psubd %xmm2, %xmm1 ; SSE-NEXT: psrld $1, %xmm1 ; SSE-NEXT: paddd %xmm2, %xmm1 @@ -822,14 +824,13 @@ define <16 x i8> @test7(<16 x i8> %a) { define <4 x i32> @test8(<4 x i32> %a) { ; SSE41-LABEL: test8: ; SSE41: # BB#0: -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027] -; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: pmuldq %xmm2, %xmm1 -; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE41-NEXT: pmuldq %xmm2, %xmm3 -; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3] +; SSE41-NEXT: pmuldq %xmm0, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] ; SSE41-NEXT: paddd %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: psrld $31, %xmm0 @@ -840,22 +841,22 @@ define <4 x i32> @test8(<4 x i32> %a) { ; ; SSE-LABEL: test8: ; SSE: # BB#0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027] -; SSE-NEXT: movdqa %xmm2, %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027] +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: psrad $31, %xmm2 +; SSE-NEXT: pand %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm0, %xmm3 +; SSE-NEXT: pmuludq %xmm1, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] ; SSE-NEXT: psrad $31, %xmm1 ; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: psrad $31, %xmm3 -; SSE-NEXT: pand %xmm2, %xmm3 -; SSE-NEXT: paddd %xmm1, %xmm3 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pmuludq %xmm2, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] -; SSE-NEXT: pmuludq %xmm2, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm4[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3] -; SSE-NEXT: psubd %xmm3, %xmm1 +; SSE-NEXT: paddd %xmm1, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE-NEXT: pmuludq %xmm4, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE-NEXT: psubd %xmm2, %xmm1 ; SSE-NEXT: paddd %xmm0, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: psrld $31, %xmm0 @@ -867,12 +868,12 @@ define <4 x i32> @test8(<4 x i32> %a) { ; AVX-LABEL: test8: ; AVX: # BB#0: ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 -; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; AVX-NEXT: vpmuldq %xmm1, %xmm3, %xmm1 -; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3] -; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2,1,3] +; AVX-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] ; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vpsrld $31, %xmm0, %xmm1 ; AVX-NEXT: vpsrad $2, %xmm0, %xmm0 @@ -885,75 +886,77 @@ define <4 x i32> @test8(<4 x i32> %a) { define <8 x i32> @test9(<8 x i32> %a) { ; SSE41-LABEL: test9: ; SSE41: # BB#0: -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027] -; SSE41-NEXT: # kill: XMM0<def> XMM3<kill> -; SSE41-NEXT: pmuldq %xmm1, %xmm0 -; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3] +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2454267027,2454267027,2454267027,2454267027] +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; SSE41-NEXT: pmuldq %xmm4, %xmm5 -; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm5[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3] -; SSE41-NEXT: paddd %xmm3, %xmm0 -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: psrld $31, %xmm3 -; SSE41-NEXT: psrad $2, %xmm0 -; SSE41-NEXT: paddd %xmm3, %xmm0 -; SSE41-NEXT: pmuldq %xmm2, %xmm1 -; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; SSE41-NEXT: pmuldq %xmm4, %xmm3 -; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3] -; SSE41-NEXT: paddd %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psrld $31, %xmm2 -; SSE41-NEXT: psrad $2, %xmm1 -; SSE41-NEXT: paddd %xmm2, %xmm1 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmuldq %xmm3, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7] +; SSE41-NEXT: paddd %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: psrld $31, %xmm0 +; SSE41-NEXT: psrad $2, %xmm2 +; SSE41-NEXT: paddd %xmm0, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm4, %xmm0 +; SSE41-NEXT: pmuldq %xmm1, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7] +; SSE41-NEXT: paddd %xmm1, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: psrld $31, %xmm0 +; SSE41-NEXT: psrad $2, %xmm3 +; SSE41-NEXT: paddd %xmm0, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: movdqa %xmm3, %xmm1 ; SSE41-NEXT: retq ; ; SSE-LABEL: test9: ; SSE: # BB#0: -; SSE-NEXT: movdqa %xmm1, %xmm2 -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027] -; SSE-NEXT: movdqa %xmm1, %xmm4 +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2454267027,2454267027,2454267027,2454267027] +; SSE-NEXT: movdqa %xmm3, %xmm4 ; SSE-NEXT: psrad $31, %xmm4 ; SSE-NEXT: movdqa %xmm4, %xmm0 -; SSE-NEXT: pand %xmm3, %xmm0 -; SSE-NEXT: movdqa %xmm3, %xmm5 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: movdqa %xmm2, %xmm5 ; SSE-NEXT: psrad $31, %xmm5 -; SSE-NEXT: pand %xmm1, %xmm5 +; SSE-NEXT: pand %xmm3, %xmm5 ; SSE-NEXT: paddd %xmm0, %xmm5 -; SSE-NEXT: movdqa %xmm3, %xmm0 -; SSE-NEXT: pmuludq %xmm1, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,3,3] -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3] +; SSE-NEXT: movdqa %xmm2, %xmm0 +; SSE-NEXT: pmuludq %xmm3, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,3,3] +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3] ; SSE-NEXT: pmuludq %xmm6, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm7[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] ; SSE-NEXT: psubd %xmm5, %xmm0 -; SSE-NEXT: paddd %xmm3, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: psrld $31, %xmm3 -; SSE-NEXT: psrad $2, %xmm0 -; SSE-NEXT: paddd %xmm3, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm4 -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: psrad $31, %xmm3 -; SSE-NEXT: pand %xmm1, %xmm3 -; SSE-NEXT: paddd %xmm4, %xmm3 -; SSE-NEXT: pmuludq %xmm2, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] -; SSE-NEXT: pmuludq %xmm6, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm4[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3] -; SSE-NEXT: psubd %xmm3, %xmm1 -; SSE-NEXT: paddd %xmm2, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: paddd %xmm2, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: psrld $31, %xmm2 -; SSE-NEXT: psrad $2, %xmm1 -; SSE-NEXT: paddd %xmm2, %xmm1 +; SSE-NEXT: psrad $2, %xmm0 +; SSE-NEXT: paddd %xmm2, %xmm0 +; SSE-NEXT: pand %xmm1, %xmm4 +; SSE-NEXT: movdqa %xmm1, %xmm5 +; SSE-NEXT: psrad $31, %xmm5 +; SSE-NEXT: pand %xmm3, %xmm5 +; SSE-NEXT: paddd %xmm4, %xmm5 +; SSE-NEXT: pmuludq %xmm1, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] +; SSE-NEXT: pmuludq %xmm6, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE-NEXT: psubd %xmm5, %xmm2 +; SSE-NEXT: paddd %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm1 +; SSE-NEXT: psrld $31, %xmm1 +; SSE-NEXT: psrad $2, %xmm2 +; SSE-NEXT: paddd %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm1 ; SSE-NEXT: retq ; ; AVX-LABEL: test9: @@ -978,72 +981,76 @@ define <8 x i32> @test10(<8 x i32> %a) { ; SSE41-LABEL: test10: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: pmuludq %xmm2, %xmm3 -; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuludq %xmm4, %xmm5 -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm3, %xmm4 ; SSE41-NEXT: movdqa %xmm0, %xmm5 -; SSE41-NEXT: psubd %xmm3, %xmm5 -; SSE41-NEXT: psrld $1, %xmm5 -; SSE41-NEXT: paddd %xmm3, %xmm5 -; SSE41-NEXT: psrld $2, %xmm5 -; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7] -; SSE41-NEXT: pmulld %xmm3, %xmm5 -; SSE41-NEXT: psubd %xmm5, %xmm0 -; SSE41-NEXT: pmuludq %xmm1, %xmm2 -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] -; SSE41-NEXT: pmuludq %xmm4, %xmm5 -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm5[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE41-NEXT: movdqa %xmm1, %xmm4 -; SSE41-NEXT: psubd %xmm2, %xmm4 +; SSE41-NEXT: pmuludq %xmm2, %xmm5 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: psubd %xmm5, %xmm4 ; SSE41-NEXT: psrld $1, %xmm4 -; SSE41-NEXT: paddd %xmm2, %xmm4 +; SSE41-NEXT: paddd %xmm5, %xmm4 ; SSE41-NEXT: psrld $2, %xmm4 -; SSE41-NEXT: pmulld %xmm3, %xmm4 -; SSE41-NEXT: psubd %xmm4, %xmm1 +; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [7,7,7,7] +; SSE41-NEXT: pmulld %xmm5, %xmm4 +; SSE41-NEXT: psubd %xmm4, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm3, %xmm4 +; SSE41-NEXT: pmuludq %xmm1, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7] +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: psubd %xmm2, %xmm3 +; SSE41-NEXT: psrld $1, %xmm3 +; SSE41-NEXT: paddd %xmm2, %xmm3 +; SSE41-NEXT: psrld $2, %xmm3 +; SSE41-NEXT: pmulld %xmm5, %xmm3 +; SSE41-NEXT: psubd %xmm3, %xmm1 ; SSE41-NEXT: retq ; ; SSE-LABEL: test10: ; SSE: # BB#0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: pmuludq %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [613566757,613566757,613566757,613566757] +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: pmuludq %xmm3, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] ; SSE-NEXT: movdqa %xmm0, %xmm5 -; SSE-NEXT: psubd %xmm3, %xmm5 +; SSE-NEXT: psubd %xmm2, %xmm5 ; SSE-NEXT: psrld $1, %xmm5 -; SSE-NEXT: paddd %xmm3, %xmm5 +; SSE-NEXT: paddd %xmm2, %xmm5 ; SSE-NEXT: psrld $2, %xmm5 -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [7,7,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3] -; SSE-NEXT: pmuludq %xmm3, %xmm5 -; SSE-NEXT: pmuludq %xmm3, %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,1,3] +; SSE-NEXT: pmuludq %xmm2, %xmm5 +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3] +; SSE-NEXT: pmuludq %xmm2, %xmm6 +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] ; SSE-NEXT: psubd %xmm5, %xmm0 -; SSE-NEXT: pmuludq %xmm1, %xmm2 +; SSE-NEXT: pmuludq %xmm1, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm5[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] ; SSE-NEXT: movdqa %xmm1, %xmm4 -; SSE-NEXT: psubd %xmm2, %xmm4 +; SSE-NEXT: psubd %xmm3, %xmm4 ; SSE-NEXT: psrld $1, %xmm4 -; SSE-NEXT: paddd %xmm2, %xmm4 +; SSE-NEXT: paddd %xmm3, %xmm4 ; SSE-NEXT: psrld $2, %xmm4 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3] -; SSE-NEXT: pmuludq %xmm3, %xmm4 -; SSE-NEXT: pmuludq %xmm3, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm2[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] +; SSE-NEXT: pmuludq %xmm2, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] +; SSE-NEXT: pmuludq %xmm2, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] ; SSE-NEXT: psubd %xmm4, %xmm1 ; SSE-NEXT: retq ; @@ -1072,32 +1079,32 @@ define <8 x i32> @test11(<8 x i32> %a) { ; SSE41-LABEL: test11: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027] -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: pmuldq %xmm2, %xmm3 -; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuldq %xmm4, %xmm5 -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3] -; SSE41-NEXT: paddd %xmm0, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm5 -; SSE41-NEXT: psrld $31, %xmm5 -; SSE41-NEXT: psrad $2, %xmm3 -; SSE41-NEXT: paddd %xmm5, %xmm3 -; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [7,7,7,7] -; SSE41-NEXT: pmulld %xmm5, %xmm3 -; SSE41-NEXT: psubd %xmm3, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm3, %xmm4 +; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: pmuldq %xmm2, %xmm5 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7] +; SSE41-NEXT: paddd %xmm0, %xmm5 +; SSE41-NEXT: movdqa %xmm5, %xmm4 +; SSE41-NEXT: psrld $31, %xmm4 +; SSE41-NEXT: psrad $2, %xmm5 +; SSE41-NEXT: paddd %xmm4, %xmm5 +; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [7,7,7,7] +; SSE41-NEXT: pmulld %xmm4, %xmm5 +; SSE41-NEXT: psubd %xmm5, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm3, %xmm5 ; SSE41-NEXT: pmuldq %xmm1, %xmm2 -; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] -; SSE41-NEXT: pmuldq %xmm4, %xmm3 -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7] ; SSE41-NEXT: paddd %xmm1, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm3 ; SSE41-NEXT: psrld $31, %xmm3 ; SSE41-NEXT: psrad $2, %xmm2 ; SSE41-NEXT: paddd %xmm3, %xmm2 -; SSE41-NEXT: pmulld %xmm5, %xmm2 +; SSE41-NEXT: pmulld %xmm4, %xmm2 ; SSE41-NEXT: psubd %xmm2, %xmm1 ; SSE41-NEXT: retq ; @@ -1112,13 +1119,14 @@ define <8 x i32> @test11(<8 x i32> %a) { ; SSE-NEXT: psrad $31, %xmm6 ; SSE-NEXT: pand %xmm2, %xmm6 ; SSE-NEXT: paddd %xmm4, %xmm6 -; SSE-NEXT: movdqa %xmm0, %xmm7 -; SSE-NEXT: pmuludq %xmm2, %xmm7 +; SSE-NEXT: movdqa %xmm0, %xmm4 +; SSE-NEXT: pmuludq %xmm2, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] ; SSE-NEXT: pmuludq %xmm5, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,3],xmm4[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1] ; SSE-NEXT: psubd %xmm6, %xmm7 ; SSE-NEXT: paddd %xmm0, %xmm7 ; SSE-NEXT: movdqa %xmm7, %xmm4 @@ -1128,9 +1136,10 @@ define <8 x i32> @test11(<8 x i32> %a) { ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [7,7,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm7 +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3] ; SSE-NEXT: pmuludq %xmm4, %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm6[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] ; SSE-NEXT: psubd %xmm7, %xmm0 ; SSE-NEXT: pand %xmm1, %xmm3 ; SSE-NEXT: movdqa %xmm1, %xmm6 @@ -1138,10 +1147,11 @@ define <8 x i32> @test11(<8 x i32> %a) { ; SSE-NEXT: pand %xmm2, %xmm6 ; SSE-NEXT: paddd %xmm3, %xmm6 ; SSE-NEXT: pmuludq %xmm1, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; SSE-NEXT: pmuludq %xmm5, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: psubd %xmm6, %xmm2 ; SSE-NEXT: paddd %xmm1, %xmm2 ; SSE-NEXT: movdqa %xmm2, %xmm3 @@ -1150,9 +1160,10 @@ define <8 x i32> @test11(<8 x i32> %a) { ; SSE-NEXT: paddd %xmm3, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; SSE-NEXT: pmuludq %xmm4, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: psubd %xmm2, %xmm1 ; SSE-NEXT: retq ; @@ -1202,16 +1213,15 @@ define <4 x i32> @PR20355(<4 x i32> %a) { ; SSE41-LABEL: PR20355: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766] -; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm2, %xmm3 ; SSE41-NEXT: pmuldq %xmm1, %xmm0 -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; SSE41-NEXT: pmuldq %xmm2, %xmm1 -; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3] -; SSE41-NEXT: movaps %xmm0, %xmm1 -; SSE41-NEXT: psrld $31, %xmm1 -; SSE41-NEXT: paddd %xmm0, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] ; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psrld $31, %xmm0 +; SSE41-NEXT: paddd %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; SSE-LABEL: PR20355: @@ -1226,26 +1236,26 @@ define <4 x i32> @PR20355(<4 x i32> %a) { ; SSE-NEXT: paddd %xmm2, %xmm3 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE-NEXT: pmuludq %xmm1, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; SSE-NEXT: pmuludq %xmm2, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3] -; SSE-NEXT: psubd %xmm3, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psrld $31, %xmm1 -; SSE-NEXT: paddd %xmm0, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] +; SSE-NEXT: pmuludq %xmm2, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1] +; SSE-NEXT: psubd %xmm3, %xmm4 +; SSE-NEXT: movdqa %xmm4, %xmm0 +; SSE-NEXT: psrld $31, %xmm0 +; SSE-NEXT: paddd %xmm4, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: PR20355: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 -; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[1,3],xmm0[1,3] -; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3] +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; AVX-NEXT: vpsrld $31, %xmm0, %xmm1 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq |