diff options
author | Evan Cheng <evan.cheng@apple.com> | 2007-12-15 03:00:47 +0000 |
---|---|---|
committer | Evan Cheng <evan.cheng@apple.com> | 2007-12-15 03:00:47 +0000 |
commit | 15e8f5a81feebb70de587edf9622c2aff8284564 (patch) | |
tree | 606265dd9805e5c0a9cc8b99a8a94df2bb628f25 /lib | |
parent | d606e673d9979acfea51216471c7de4ba1f07cae (diff) | |
download | external_llvm-15e8f5a81feebb70de587edf9622c2aff8284564.zip external_llvm-15e8f5a81feebb70de587edf9622c2aff8284564.tar.gz external_llvm-15e8f5a81feebb70de587edf9622c2aff8284564.tar.bz2 |
Make better use of instructions that clear high bits; fix various 2-wide shuffle bugs.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@45058 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 78 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrSSE.td | 83 | ||||
-rw-r--r-- | lib/Target/X86/X86RegisterInfo.cpp | 3 |
3 files changed, 119 insertions, 45 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c2f2736..ed1df4d 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3138,8 +3138,6 @@ static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, return V; } -/// is4WideVector - Returns true if the specific v8i16 or v16i8 vector is -/// actually just a 4 wide vector. e.g. <a, a, y, y, d, d, x, x> SDOperand X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { // All zero's are handled with pxor, all one's are handled with pcmpeqd. @@ -3562,17 +3560,35 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, } } -/// RewriteAs4WideShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide -/// ones if possible. This can be done when every pair / quad of shuffle mask -/// elements point to elements in the right sequence. e.g. +/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide +/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be +/// done when every pair / quad of shuffle mask elements point to elements in +/// the right sequence. e.g. /// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> static -SDOperand RewriteAs4WideShuffle(SDOperand V1, SDOperand V2, +SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, + MVT::ValueType VT, SDOperand PermMask, SelectionDAG &DAG, TargetLowering &TLI) { unsigned NumElems = PermMask.getNumOperands(); - unsigned Scale = NumElems / 4; - SmallVector<SDOperand, 4> MaskVec; + unsigned NewWidth = (NumElems == 4) ? 2 : 4; + MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); + MVT::ValueType NewVT = MaskVT; + switch (VT) { + case MVT::v4f32: NewVT = MVT::v2f64; break; + case MVT::v4i32: NewVT = MVT::v2i64; break; + case MVT::v8i16: NewVT = MVT::v4i32; break; + case MVT::v16i8: NewVT = MVT::v4i32; break; + default: assert(false && "Unexpected!"); + } + + if (NewWidth == 2) + if (MVT::isInteger(VT)) + NewVT = MVT::v2i64; + else + NewVT = MVT::v2f64; + unsigned Scale = NumElems / NewWidth; + SmallVector<SDOperand, 8> MaskVec; for (unsigned i = 0; i < NumElems; i += Scale) { unsigned StartIdx = ~0U; for (unsigned j = 0; j < Scale; ++j) { @@ -3591,10 +3607,11 @@ SDOperand RewriteAs4WideShuffle(SDOperand V1, SDOperand V2, MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); } - V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); - V2 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V2); - return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, V2, - DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, &MaskVec[0],4)); + V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1); + V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2); + return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2, + DAG.getNode(ISD::BUILD_VECTOR, MaskVT, + &MaskVec[0], MaskVec.size())); } SDOperand @@ -3626,6 +3643,35 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { return PromoteSplat(Op, DAG); } + // If the shuffle can be profitably rewritten as a narrower shuffle, then + // do it! + if (VT == MVT::v8i16 || VT == MVT::v16i8) { + SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); + if (NewOp.Val) + return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); + } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { + // FIXME: Figure out a cleaner way to do this. + // Try to make use of movq to zero out the top part. + if (ISD::isBuildVectorAllZeros(V2.Val)) { + SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); + if (NewOp.Val) { + SDOperand NewV1 = NewOp.getOperand(0); + SDOperand NewV2 = NewOp.getOperand(1); + SDOperand NewMask = NewOp.getOperand(2); + if (isCommutedMOVL(NewMask.Val, true, false)) { + NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); + NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(), + NewV1, NewV2, getMOVLMask(2, DAG)); + return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); + } + } + } else if (ISD::isBuildVectorAllZeros(V1.Val)) { + SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); + if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) + return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); + } + } + if (X86::isMOVLMask(PermMask.Val)) return (V1IsUndef) ? V2 : Op; @@ -3654,6 +3700,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { Commuted = true; } + // FIXME: Figure out a cleaner way to do this. if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { if (V2IsUndef) return V1; Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); @@ -3735,13 +3782,6 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { } } - // If the shuffle can be rewritten as a 4 wide shuffle, then do it! - if (VT == MVT::v8i16 || VT == MVT::v16i8) { - SDOperand NewOp = RewriteAs4WideShuffle(V1, V2, PermMask, DAG, *this); - if (NewOp.Val) - return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); - } - // Handle v8i16 specifically since SSE can do byte extraction and insertion. if (VT == MVT::v8i16) { SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 26767a5..65c7906 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -2224,35 +2224,56 @@ let AddedComplexity = 20 in (loadf64 addr:$src))), MOVL_shuffle_mask)))]>; -let AddedComplexity = 15 in // movd / movq to XMM register zero-extends +let AddedComplexity = 15 in { def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV, (v4i32 (scalar_to_vector GR32:$src)), MOVL_shuffle_mask)))]>; -let AddedComplexity = 20 in +// This is X86-64 only. +def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), + "mov{d|q}\t{$src, $dst|$dst, $src}", + [(set VR128:$dst, + (v2i64 (vector_shuffle immAllZerosV_bc, + (v2i64 (scalar_to_vector GR64:$src)), + MOVL_shuffle_mask)))]>; +} + +let AddedComplexity = 20 in { def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV, (v4i32 (scalar_to_vector (loadi32 addr:$src))), MOVL_shuffle_mask)))]>; - -// Moving from XMM to XMM but still clear upper 64 bits. -let AddedComplexity = 15 in -def MOVZQI2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movq\t{$src, $dst|$dst, $src}", - [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>, - XS, Requires<[HasSSE2]>; -let AddedComplexity = 20 in def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), "movq\t{$src, $dst|$dst, $src}", - [(set VR128:$dst, (int_x86_sse2_movl_dq - (bitconvert (memopv2i64 addr:$src))))]>, - XS, Requires<[HasSSE2]>; + [(set VR128:$dst, + (v2i64 (vector_shuffle immAllZerosV_bc, + (v2i64 (scalar_to_vector (loadi64 addr:$src))), + MOVL_shuffle_mask)))]>, XS, + Requires<[HasSSE2]>; +} +// Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in +// IA32 document. movq xmm1, xmm2 does clear the high bits. +let AddedComplexity = 15 in +def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + "movq\t{$src, $dst|$dst, $src}", + [(set VR128:$dst, (v2i64 (vector_shuffle immAllZerosV_bc, + VR128:$src, + MOVL_shuffle_mask)))]>, + XS, Requires<[HasSSE2]>; + +let AddedComplexity = 20 in +def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), + "movq\t{$src, $dst|$dst, $src}", + [(set VR128:$dst, (v2i64 (vector_shuffle immAllZerosV_bc, + (memopv2i64 addr:$src), + MOVL_shuffle_mask)))]>, + XS, Requires<[HasSSE2]>; //===----------------------------------------------------------------------===// // SSE3 Instructions @@ -2763,13 +2784,13 @@ def : Pat<(vector_shuffle (v4f32 VR128:$src), (undef), SSE_splat_mask:$sm), // Special unary SHUFPSrri case. // FIXME: when we want non two-address code, then we should use PSHUFD? -def : Pat<(vector_shuffle (v4f32 VR128:$src1), (undef), - SHUFP_unary_shuffle_mask:$sm), +def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef), + SHUFP_unary_shuffle_mask:$sm)), (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>, Requires<[HasSSE1]>; // Special unary SHUFPDrri case. -def : Pat<(vector_shuffle (v2f64 VR128:$src1), (undef), - SHUFP_unary_shuffle_mask:$sm), +def : Pat<(v2f64 (vector_shuffle VR128:$src1, (undef), + SHUFP_unary_shuffle_mask:$sm)), (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>, Requires<[HasSSE2]>; // Unary v4f32 shuffle with PSHUF* in order to fold a load. @@ -2778,14 +2799,24 @@ def : Pat<(vector_shuffle (memopv4f32 addr:$src1), (undef), (PSHUFDmi addr:$src1, SHUFP_unary_shuffle_mask:$sm)>, Requires<[HasSSE2]>; // Special binary v4i32 shuffle cases with SHUFPS. -def : Pat<(vector_shuffle (v4i32 VR128:$src1), (v4i32 VR128:$src2), - PSHUFD_binary_shuffle_mask:$sm), +def : Pat<(v4i32 (vector_shuffle VR128:$src1, (v4i32 VR128:$src2), + PSHUFD_binary_shuffle_mask:$sm)), (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm)>, Requires<[HasSSE2]>; -def : Pat<(vector_shuffle (v4i32 VR128:$src1), - (bc_v4i32 (memopv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm), +def : Pat<(v4i32 (vector_shuffle VR128:$src1, + (bc_v4i32 (memopv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm)), (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm)>, Requires<[HasSSE2]>; +// Special binary v2i64 shuffle cases using SHUFPDrri. +def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2, + SHUFP_shuffle_mask:$sm)), + (SHUFPDrri VR128:$src1, VR128:$src2, SHUFP_shuffle_mask:$sm)>, + Requires<[HasSSE2]>; +// Special unary SHUFPDrri case. +def : Pat<(v2i64 (vector_shuffle VR128:$src1, (undef), + SHUFP_unary_shuffle_mask:$sm)), + (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>, + Requires<[HasSSE2]>; // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...> let AddedComplexity = 10 in { @@ -2888,11 +2919,11 @@ def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2, } // Set lowest element and zero upper elements. -let AddedComplexity = 20 in -def : Pat<(bc_v2i64 (vector_shuffle immAllZerosV_bc, - (v2f64 (scalar_to_vector (loadf64 addr:$src))), - MOVL_shuffle_mask)), - (MOVZQI2PQIrm addr:$src)>, Requires<[HasSSE2]>; +let AddedComplexity = 15 in +def : Pat<(v2f64 (vector_shuffle immAllZerosV_bc, VR128:$src, + MOVL_shuffle_mask)), + (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>; + // FIXME: Temporary workaround since 2-wide shuffle is broken. def : Pat<(int_x86_sse2_movs_d VR128:$src1, VR128:$src2), diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 883cd9c..a038366 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -409,6 +409,9 @@ X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, { X86::MOVSX64rr8, X86::MOVSX64rm8 }, { X86::MOVUPDrr, X86::MOVUPDrm }, { X86::MOVUPSrr, X86::MOVUPSrm }, + { X86::MOVZDI2PDIrr, X86::MOVZDI2PDIrm }, + { X86::MOVZQI2PQIrr, X86::MOVZQI2PQIrm }, + { X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm }, { X86::MOVZX16rr8, X86::MOVZX16rm8 }, { X86::MOVZX32rr16, X86::MOVZX32rm16 }, { X86::MOVZX32rr8, X86::MOVZX32rm8 }, |