diff options
author | Nadav Rotem <nrotem@apple.com> | 2013-01-20 08:35:56 +0000 |
---|---|---|
committer | Nadav Rotem <nrotem@apple.com> | 2013-01-20 08:35:56 +0000 |
commit | 0c8607ba6a21578996a7532b9390afba13bd2087 (patch) | |
tree | 71f7605e9ff90372ea8159b6f7be62b3d1a03fe2 /lib/Target/X86 | |
parent | ec98d2ce5ec51148e442c024b77a7483c81eee6c (diff) | |
download | external_llvm-0c8607ba6a21578996a7532b9390afba13bd2087.zip external_llvm-0c8607ba6a21578996a7532b9390afba13bd2087.tar.gz external_llvm-0c8607ba6a21578996a7532b9390afba13bd2087.tar.bz2 |
Revert 172708.
The optimization handles esoteric cases but adds a lot of complexity both to the X86 backend and to other backends.
This optimization disables an important canonicalization of chains of SEXT nodes and makes SEXT and ZEXT asymmetrical.
Disabling the canonicalization of consecutive SEXT nodes into a single node disables other DAG optimizations that assume
that there is only one SEXT node. The AVX mask optimizations is one example. Additionally this optimization does not update the cost model.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@172968 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86')
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 28 |
1 files changed, 2 insertions, 26 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index ec9f675..c2b209d 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -17000,38 +17000,14 @@ static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - EVT VT = N->getValueType(0); - - if (!VT.isVector()) - return SDValue(); - - SDValue In = N->getOperand(0); - EVT InVT = In.getValueType(); - DebugLoc dl = N->getDebugLoc(); - unsigned ExtendedEltSize = VT.getVectorElementType().getSizeInBits(); - - // Split SIGN_EXTEND operation to use vmovsx instruction when possible - if (InVT == MVT::v8i8) { - if (ExtendedEltSize > 16 && !Subtarget->hasInt256()) - In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, In); - if (ExtendedEltSize > 32) - In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i32, In); - return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, In); - } - - if ((InVT == MVT::v4i8 || InVT == MVT::v4i16) && - ExtendedEltSize > 32 && !Subtarget->hasInt256()) { - In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In); - return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, In); - } - if (!DCI.isBeforeLegalizeOps()) return SDValue(); if (!Subtarget->hasFp256()) return SDValue(); - if (VT.is256BitVector()) { + EVT VT = N->getValueType(0); + if (VT.isVector() && VT.getSizeInBits() == 256) { SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget); if (R.getNode()) return R; |