diff options
Diffstat (limited to 'test/CodeGen/X86/sse41.ll')
-rw-r--r-- | test/CodeGen/X86/sse41.ll | 77 |
1 files changed, 50 insertions, 27 deletions
diff --git a/test/CodeGen/X86/sse41.ll b/test/CodeGen/X86/sse41.ll index a5b07e7..ca13392 100644 --- a/test/CodeGen/X86/sse41.ll +++ b/test/CodeGen/X86/sse41.ll @@ -43,7 +43,7 @@ define <2 x i64> @pmovsxbd_1(i32* %p) nounwind { ; X64-NEXT: pmovsxbd (%rdi), %xmm0 ; X64-NEXT: retq entry: - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %1 = insertelement <4 x i32> undef, i32 %0, i32 0 %2 = insertelement <4 x i32> %1, i32 0, i32 1 %3 = insertelement <4 x i32> %2, i32 0, i32 2 @@ -66,7 +66,7 @@ define <2 x i64> @pmovsxwd_1(i64* %p) nounwind readonly { ; X64-NEXT: pmovsxwd (%rdi), %xmm0 ; X64-NEXT: retq entry: - %0 = load i64* %p ; <i64> [#uses=1] + %0 = load i64, i64* %p ; <i64> [#uses=1] %tmp2 = insertelement <2 x i64> zeroinitializer, i64 %0, i32 0 ; <<2 x i64>> [#uses=1] %1 = bitcast <2 x i64> %tmp2 to <8 x i16> ; <<8 x i16>> [#uses=1] %2 = tail call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1) nounwind readnone ; <<4 x i32>> [#uses=1] @@ -87,7 +87,7 @@ define <2 x i64> @pmovzxbq_1() nounwind { ; X64-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero ; X64-NEXT: retq entry: - %0 = load i16* @g16, align 2 ; <i16> [#uses=1] + %0 = load i16, i16* @g16, align 2 ; <i16> [#uses=1] %1 = insertelement <8 x i16> undef, i16 %0, i32 0 ; <<8 x i16>> [#uses=1] %2 = bitcast <8 x i16> %1 to <16 x i8> ; <<16 x i8>> [#uses=1] %3 = tail call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %2) nounwind readnone ; <<2 x i64>> [#uses=1] @@ -199,28 +199,51 @@ define <4 x float> @insertps_1(<4 x float> %t1, <4 x float> %t2) nounwind { declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) nounwind readnone -define <4 x float> @insertps_2(<4 x float> %t1, float %t2) nounwind { -; X32-LABEL: insertps_2: +; When optimizing for speed, prefer blendps over insertps even if it means we have to +; generate a separate movss to load the scalar operand. +define <4 x float> @blendps_not_insertps_1(<4 x float> %t1, float %t2) nounwind { +; X32-LABEL: blendps_not_insertps_1: +; X32: ## BB#0: +; X32-NEXT: movss {{.*#+}} xmm1 +; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; X32-NEXT: retl +; +; X64-LABEL: blendps_not_insertps_1: +; X64: ## BB#0: +; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; X64-NEXT: retq + %tmp1 = insertelement <4 x float> %t1, float %t2, i32 0 + ret <4 x float> %tmp1 +} + +; When optimizing for size, generate an insertps if there's a load fold opportunity. +; The difference between i386 and x86-64 ABIs for the float operand means we should +; generate an insertps for X32 but not for X64! +define <4 x float> @insertps_or_blendps(<4 x float> %t1, float %t2) minsize nounwind { +; X32-LABEL: insertps_or_blendps: ; X32: ## BB#0: ; X32-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3] ; X32-NEXT: retl ; -; X64-LABEL: insertps_2: +; X64-LABEL: insertps_or_blendps: ; X64: ## BB#0: -; X64-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; X64-NEXT: retq %tmp1 = insertelement <4 x float> %t1, float %t2, i32 0 ret <4 x float> %tmp1 } -define <4 x float> @insertps_3(<4 x float> %t1, <4 x float> %t2) nounwind { -; X32-LABEL: insertps_3: + +; An insert into the low 32-bits of a vector from the low 32-bits of another vector +; is always just a blendps because blendps is never more expensive than insertps. +define <4 x float> @blendps_not_insertps_2(<4 x float> %t1, <4 x float> %t2) nounwind { +; X32-LABEL: blendps_not_insertps_2: ; X32: ## BB#0: -; X32-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; X32-NEXT: retl ; -; X64-LABEL: insertps_3: +; X64-LABEL: blendps_not_insertps_2: ; X64: ## BB#0: -; X64-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; X64-NEXT: retq %tmp2 = extractelement <4 x float> %t2, i32 0 %tmp1 = insertelement <4 x float> %t1, float %tmp2, i32 0 @@ -330,7 +353,7 @@ define <4 x float> @insertps_from_shufflevector_1(<4 x float> %a, <4 x float>* n ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] ; X64-NEXT: retq entry: - %0 = load <4 x float>* %pb, align 16 + %0 = load <4 x float>, <4 x float>* %pb, align 16 %vecinit6 = shufflevector <4 x float> %a, <4 x float> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4> ret <4 x float> %vecinit6 } @@ -366,7 +389,7 @@ define <4 x i32> @pinsrd_from_shufflevector_i32(<4 x i32> %a, <4 x i32>* nocaptu ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7] ; X64-NEXT: retq entry: - %0 = load <4 x i32>* %pb, align 16 + %0 = load <4 x i32>, <4 x i32>* %pb, align 16 %vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4> ret <4 x i32> %vecinit6 } @@ -399,7 +422,7 @@ define <4 x float> @insertps_from_load_ins_elt_undef(<4 x float> %a, float* %b) ; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3] ; X64-NEXT: retq - %1 = load float* %b, align 4 + %1 = load float, float* %b, align 4 %2 = insertelement <4 x float> undef, float %1, i32 0 %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 3> ret <4 x float> %result @@ -421,7 +444,7 @@ define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) { ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7] ; X64-NEXT: retq - %1 = load i32* %b, align 4 + %1 = load i32, i32* %b, align 4 %2 = insertelement <4 x i32> undef, i32 %1, i32 0 %result = shufflevector <4 x i32> %a, <4 x i32> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 3> ret <4 x i32> %result @@ -823,7 +846,7 @@ define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocap ; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] ; X64-NEXT: retq - %1 = load <4 x float>* %pb, align 16 + %1 = load <4 x float>, <4 x float>* %pb, align 16 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48) ret <4 x float> %2 } @@ -841,7 +864,7 @@ define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float> ; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[1],xmm0[3] ; X64-NEXT: retq - %1 = load <4 x float>* %pb, align 16 + %1 = load <4 x float>, <4 x float>* %pb, align 16 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96) ret <4 x float> %2 } @@ -861,8 +884,8 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa ; X64-NEXT: shlq $4, %rsi ; X64-NEXT: insertps {{.*#+}} xmm0 = mem[3],xmm0[1,2,3] ; X64-NEXT: retq - %1 = getelementptr inbounds <4 x float>* %pb, i64 %index - %2 = load <4 x float>* %1, align 16 + %1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index + %2 = load <4 x float>, <4 x float>* %1, align 16 %3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192) ret <4 x float> %3 } @@ -883,8 +906,8 @@ define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocap ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0] ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] ; X64-NEXT: retq - %1 = getelementptr inbounds float* %fb, i64 %index - %2 = load float* %1, align 4 + %1 = getelementptr inbounds float, float* %fb, i64 %index + %2 = load float, float* %1, align 4 %3 = insertelement <4 x float> undef, float %2, i32 0 %4 = insertelement <4 x float> %3, float %2, i32 1 %5 = insertelement <4 x float> %4, float %2, i32 2 @@ -908,7 +931,7 @@ define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0] ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] ; X64-NEXT: retq - %1 = load <4 x float>* %b, align 4 + %1 = load <4 x float>, <4 x float>* %b, align 4 %2 = extractelement <4 x float> %1, i32 0 %3 = insertelement <4 x float> undef, float %2, i32 0 %4 = insertelement <4 x float> %3, float %2, i32 1 @@ -947,8 +970,8 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl ; X64-NEXT: addps %xmm2, %xmm3 ; X64-NEXT: addps %xmm3, %xmm0 ; X64-NEXT: retq - %1 = getelementptr inbounds float* %fb, i64 %index - %2 = load float* %1, align 4 + %1 = getelementptr inbounds float, float* %fb, i64 %index + %2 = load float, float* %1, align 4 %3 = insertelement <4 x float> undef, float %2, i32 0 %4 = insertelement <4 x float> %3, float %2, i32 1 %5 = insertelement <4 x float> %4, float %2, i32 2 @@ -978,7 +1001,7 @@ define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) { ; X64-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; X64-NEXT: movapd %xmm1, %xmm0 ; X64-NEXT: retq - %1 = load float* %b, align 4 + %1 = load float, float* %b, align 4 %2 = insertelement <4 x float> undef, float %1, i32 0 %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 4, i32 undef, i32 0, i32 7> ret <4 x float> %result @@ -997,7 +1020,7 @@ define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) { ; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[2] ; X64-NEXT: retq - %load = load <4 x float> *%ptr + %load = load <4 x float> , <4 x float> *%ptr %ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2> ret <4 x float> %ret } |