aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/X86/X86ISelDAGToDAG.cpp
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2012-01-11 09:35:02 +0000
committerChandler Carruth <chandlerc@gmail.com>2012-01-11 09:35:02 +0000
commitdddcd78e24babb4ca6b35d99abe40bdedde71fab (patch)
treeec0434e99701fe556ff6e68096deea8b8cf0a5ea /lib/Target/X86/X86ISelDAGToDAG.cpp
parentfde2c1a4c67c8a858b08785bc34aadf07f5c1a44 (diff)
downloadexternal_llvm-dddcd78e24babb4ca6b35d99abe40bdedde71fab.zip
external_llvm-dddcd78e24babb4ca6b35d99abe40bdedde71fab.tar.gz
external_llvm-dddcd78e24babb4ca6b35d99abe40bdedde71fab.tar.bz2
Unify the interface of the three mask+shift transform helpers, and
factor the differences that were hiding in one of them into its other caller, the SRL handling code. No change in behavior. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@147940 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86/X86ISelDAGToDAG.cpp')
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp60
1 files changed, 34 insertions, 26 deletions
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index 326ee87..7499182 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -863,35 +863,24 @@ static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
// andl $124, %rcx
// addl (%rsi,%rcx), %eax
//
+// Note that this function assumes the mask is provided as a mask *after* the
+// value is shifted. The input chain may or may not match that, but computing
+// such a mask is trivial.
static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
+ uint64_t Mask,
+ SDValue Shift, SDValue X,
X86ISelAddressMode &AM) {
- // Scale must not be used already.
- if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) return true;
-
- SDValue Shift = N;
- SDValue And = N.getOperand(0);
- if (N.getOpcode() != ISD::SRL)
- std::swap(Shift, And);
- if (Shift.getOpcode() != ISD::SRL || And.getOpcode() != ISD::AND ||
- !Shift.hasOneUse() ||
- !isa<ConstantSDNode>(Shift.getOperand(1)) ||
- !isa<ConstantSDNode>(And.getOperand(1)))
+ if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
+ !isa<ConstantSDNode>(Shift.getOperand(1)))
return true;
- SDValue X = (N == Shift ? And.getOperand(0) : Shift.getOperand(0));
- // We only handle up to 64-bit values here as those are what matter for
- // addressing mode optimizations.
- if (X.getValueSizeInBits() > 64) return true;
-
- uint64_t Mask = And.getConstantOperandVal(1);
unsigned ShiftAmt = Shift.getConstantOperandVal(1);
unsigned MaskLZ = CountLeadingZeros_64(Mask);
unsigned MaskTZ = CountTrailingZeros_64(Mask);
// The amount of shift we're trying to fit into the addressing mode is taken
- // from the trailing zeros of the mask. If the mask is pre-shift, we subtract
- // the shift amount.
- int AMShiftAmt = MaskTZ - (N == Shift ? ShiftAmt : 0);
+ // from the trailing zeros of the mask.
+ unsigned AMShiftAmt = MaskTZ;
// There is nothing we can do here unless the mask is removing some bits.
// Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
@@ -901,9 +890,8 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
// Scale the leading zero count down based on the actual size of the value.
- // Also scale it down based on the size of the shift if it was applied
- // before the mask.
- MaskLZ -= (64 - X.getValueSizeInBits()) + (N == Shift ? 0 : ShiftAmt);
+ // Also scale it down based on the size of the shift.
+ MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt;
// The final check is to ensure that any masked out high bits of X are
// already known to be zero. Otherwise, the mask has a semantic impact
@@ -1062,12 +1050,32 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
break;
}
- case ISD::SRL:
+ case ISD::SRL: {
+ // Scale must not be used already.
+ if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
+
+ SDValue And = N.getOperand(0);
+ if (And.getOpcode() != ISD::AND) break;
+ SDValue X = And.getOperand(0);
+
+ // We only handle up to 64-bit values here as those are what matter for
+ // addressing mode optimizations.
+ if (X.getValueSizeInBits() > 64) break;
+
+ // The mask used for the transform is expected to be post-shift, but we
+ // found the shift first so just apply the shift to the mask before passing
+ // it down.
+ if (!isa<ConstantSDNode>(N.getOperand(1)) ||
+ !isa<ConstantSDNode>(And.getOperand(1)))
+ break;
+ uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
+
// Try to fold the mask and shift into the scale, and return false if we
// succeed.
- if (!FoldMaskAndShiftToScale(*CurDAG, N, AM))
+ if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
return false;
break;
+ }
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI:
@@ -1257,7 +1265,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
return false;
// Try to fold the mask and shift directly into the scale.
- if (!FoldMaskAndShiftToScale(*CurDAG, N, AM))
+ if (!FoldMaskAndShiftToScale(*CurDAG, N, C2->getZExtValue(), Shift, X, AM))
return false;
// Try to swap the mask and shift to place shifts which can be done as