diff options
author | Dale Johannesen <dalej@apple.com> | 2008-08-28 02:44:49 +0000 |
---|---|---|
committer | Dale Johannesen <dalej@apple.com> | 2008-08-28 02:44:49 +0000 |
commit | e00a8a2a2e11a37fd1ddf2504bd22d225d0994d0 (patch) | |
tree | 484cf9b89070b672dd21616a49c0f15109bd51b8 | |
parent | f2c785edf0d7ceb4491333146e289fdbbba1dddf (diff) | |
download | external_llvm-e00a8a2a2e11a37fd1ddf2504bd22d225d0994d0.zip external_llvm-e00a8a2a2e11a37fd1ddf2504bd22d225d0994d0.tar.gz external_llvm-e00a8a2a2e11a37fd1ddf2504bd22d225d0994d0.tar.bz2 |
Split the ATOMIC NodeType's to include the size, e.g.
ATOMIC_LOAD_ADD_{8,16,32,64} instead of ATOMIC_LOAD_ADD.
Increased the Hardcoded Constant OpActionsCapacity to match.
Large but boring; no functional change.
This is to support partial-word atomics on ppc; i8 is
not a valid type there, so by the time we get to lowering, the
ATOMIC_LOAD nodes looks the same whether the type was i8 or i32.
The information can be added to the AtomicSDNode, but that is the
largest SDNode; I don't fully understand the SDNode allocation,
but it is sensitive to the largest node size, so increasing
that must be bad. This is the alternative.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55457 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | include/llvm/CodeGen/SelectionDAGNodes.h | 207 | ||||
-rw-r--r-- | include/llvm/Target/TargetLowering.h | 2 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 126 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 176 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | 197 | ||||
-rw-r--r-- | lib/Target/TargetSelectionDAG.td | 268 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 35 | ||||
-rw-r--r-- | lib/Target/X86/X86Instr64bit.td | 16 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrInfo.td | 40 |
9 files changed, 717 insertions, 350 deletions
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h index b509667..8693173 100644 --- a/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -58,6 +58,8 @@ struct SDVTList { /// ISD namespace - This namespace contains an enum which represents all of the /// SelectionDAG node types and value types. /// +/// If you add new elements here you should increase OpActionsCapacity in +/// TargetLowering.h by the number of new elements. namespace ISD { //===--------------------------------------------------------------------===// @@ -589,38 +591,64 @@ namespace ISD { // this corresponds to the atomic.lcs intrinsic. // cmp is compared to *ptr, and if equal, swap is stored in *ptr. // the return is always the original value in *ptr - ATOMIC_CMP_SWAP, - - // Val, OUTCHAIN = ATOMIC_LOAD_ADD(INCHAIN, ptr, amt) - // this corresponds to the atomic.las intrinsic. - // *ptr + amt is stored to *ptr atomically. - // the return is always the original value in *ptr - ATOMIC_LOAD_ADD, + ATOMIC_CMP_SWAP_8, + ATOMIC_CMP_SWAP_16, + ATOMIC_CMP_SWAP_32, + ATOMIC_CMP_SWAP_64, // Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) // this corresponds to the atomic.swap intrinsic. // amt is stored to *ptr atomically. // the return is always the original value in *ptr - ATOMIC_SWAP, + ATOMIC_SWAP_8, + ATOMIC_SWAP_16, + ATOMIC_SWAP_32, + ATOMIC_SWAP_64, - // Val, OUTCHAIN = ATOMIC_LOAD_SUB(INCHAIN, ptr, amt) - // this corresponds to the atomic.lss intrinsic. - // *ptr - amt is stored to *ptr atomically. - // the return is always the original value in *ptr - ATOMIC_LOAD_SUB, - // Val, OUTCHAIN = ATOMIC_L[OpName]S(INCHAIN, ptr, amt) // this corresponds to the atomic.[OpName] intrinsic. // op(*ptr, amt) is stored to *ptr atomically. // the return is always the original value in *ptr - ATOMIC_LOAD_AND, - ATOMIC_LOAD_OR, - ATOMIC_LOAD_XOR, - ATOMIC_LOAD_NAND, - ATOMIC_LOAD_MIN, - ATOMIC_LOAD_MAX, - ATOMIC_LOAD_UMIN, - ATOMIC_LOAD_UMAX, + ATOMIC_LOAD_ADD_8, + ATOMIC_LOAD_SUB_8, + ATOMIC_LOAD_AND_8, + ATOMIC_LOAD_OR_8, + ATOMIC_LOAD_XOR_8, + ATOMIC_LOAD_NAND_8, + ATOMIC_LOAD_MIN_8, + ATOMIC_LOAD_MAX_8, + ATOMIC_LOAD_UMIN_8, + ATOMIC_LOAD_UMAX_8, + ATOMIC_LOAD_ADD_16, + ATOMIC_LOAD_SUB_16, + ATOMIC_LOAD_AND_16, + ATOMIC_LOAD_OR_16, + ATOMIC_LOAD_XOR_16, + ATOMIC_LOAD_NAND_16, + ATOMIC_LOAD_MIN_16, + ATOMIC_LOAD_MAX_16, + ATOMIC_LOAD_UMIN_16, + ATOMIC_LOAD_UMAX_16, + ATOMIC_LOAD_ADD_32, + ATOMIC_LOAD_SUB_32, + ATOMIC_LOAD_AND_32, + ATOMIC_LOAD_OR_32, + ATOMIC_LOAD_XOR_32, + ATOMIC_LOAD_NAND_32, + ATOMIC_LOAD_MIN_32, + ATOMIC_LOAD_MAX_32, + ATOMIC_LOAD_UMIN_32, + ATOMIC_LOAD_UMAX_32, + ATOMIC_LOAD_ADD_64, + ATOMIC_LOAD_SUB_64, + ATOMIC_LOAD_AND_64, + ATOMIC_LOAD_OR_64, + ATOMIC_LOAD_XOR_64, + ATOMIC_LOAD_NAND_64, + ATOMIC_LOAD_MIN_64, + ATOMIC_LOAD_MAX_64, + ATOMIC_LOAD_UMIN_64, + ATOMIC_LOAD_UMAX_64, // BUILTIN_OP_END - This must be the last enum value in this list. BUILTIN_OP_END @@ -1512,20 +1540,59 @@ public: // Methods to support isa and dyn_cast static bool classof(const MemSDNode *) { return true; } static bool classof(const SDNode *N) { - return N->getOpcode() == ISD::LOAD || - N->getOpcode() == ISD::STORE || - N->getOpcode() == ISD::ATOMIC_CMP_SWAP || - N->getOpcode() == ISD::ATOMIC_LOAD_ADD || - N->getOpcode() == ISD::ATOMIC_SWAP || - N->getOpcode() == ISD::ATOMIC_LOAD_SUB || - N->getOpcode() == ISD::ATOMIC_LOAD_AND || - N->getOpcode() == ISD::ATOMIC_LOAD_OR || - N->getOpcode() == ISD::ATOMIC_LOAD_XOR || - N->getOpcode() == ISD::ATOMIC_LOAD_NAND || - N->getOpcode() == ISD::ATOMIC_LOAD_MIN || - N->getOpcode() == ISD::ATOMIC_LOAD_MAX || - N->getOpcode() == ISD::ATOMIC_LOAD_UMIN || - N->getOpcode() == ISD::ATOMIC_LOAD_UMAX; + return N->getOpcode() == ISD::LOAD || + N->getOpcode() == ISD::STORE || + N->getOpcode() == ISD::ATOMIC_CMP_SWAP_8 || + N->getOpcode() == ISD::ATOMIC_SWAP_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_AND_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_OR_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_8 || + + N->getOpcode() == ISD::ATOMIC_CMP_SWAP_16 || + N->getOpcode() == ISD::ATOMIC_SWAP_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_AND_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_OR_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_16 || + + N->getOpcode() == ISD::ATOMIC_CMP_SWAP_32 || + N->getOpcode() == ISD::ATOMIC_SWAP_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_AND_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_OR_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_32 || + + N->getOpcode() == ISD::ATOMIC_CMP_SWAP_64 || + N->getOpcode() == ISD::ATOMIC_SWAP_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_AND_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_OR_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_64; } }; @@ -1567,23 +1634,65 @@ class AtomicSDNode : public MemSDNode { const SDValue &getBasePtr() const { return getOperand(1); } const SDValue &getVal() const { return getOperand(2); } - bool isCompareAndSwap() const { return getOpcode() == ISD::ATOMIC_CMP_SWAP; } + bool isCompareAndSwap() const { + unsigned Op = getOpcode(); + return Op == ISD::ATOMIC_CMP_SWAP_8 || + Op == ISD::ATOMIC_CMP_SWAP_16 || + Op == ISD::ATOMIC_CMP_SWAP_32 || + Op == ISD::ATOMIC_CMP_SWAP_64; + } // Methods to support isa and dyn_cast static bool classof(const AtomicSDNode *) { return true; } static bool classof(const SDNode *N) { - return N->getOpcode() == ISD::ATOMIC_CMP_SWAP || - N->getOpcode() == ISD::ATOMIC_LOAD_ADD || - N->getOpcode() == ISD::ATOMIC_SWAP || - N->getOpcode() == ISD::ATOMIC_LOAD_SUB || - N->getOpcode() == ISD::ATOMIC_LOAD_AND || - N->getOpcode() == ISD::ATOMIC_LOAD_OR || - N->getOpcode() == ISD::ATOMIC_LOAD_XOR || - N->getOpcode() == ISD::ATOMIC_LOAD_NAND || - N->getOpcode() == ISD::ATOMIC_LOAD_MIN || - N->getOpcode() == ISD::ATOMIC_LOAD_MAX || - N->getOpcode() == ISD::ATOMIC_LOAD_UMIN || - N->getOpcode() == ISD::ATOMIC_LOAD_UMAX; + return N->getOpcode() == ISD::ATOMIC_CMP_SWAP_8 || + N->getOpcode() == ISD::ATOMIC_SWAP_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_AND_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_OR_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_8 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_8 || + N->getOpcode() == ISD::ATOMIC_CMP_SWAP_16 || + N->getOpcode() == ISD::ATOMIC_SWAP_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_AND_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_OR_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_16 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_16 || + N->getOpcode() == ISD::ATOMIC_CMP_SWAP_32 || + N->getOpcode() == ISD::ATOMIC_SWAP_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_AND_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_OR_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_32 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_32 || + N->getOpcode() == ISD::ATOMIC_CMP_SWAP_64 || + N->getOpcode() == ISD::ATOMIC_SWAP_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_AND_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_OR_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_64 || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_64; } }; diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index bb3105b..c0c153c 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -1386,7 +1386,7 @@ private: MVT TransformToType[MVT::LAST_VALUETYPE]; // Defines the capacity of the TargetLowering::OpActions table - static const int OpActionsCapacity = 176; + static const int OpActionsCapacity = 212; /// OpActions - For each operation and each value type, keep a LegalizeAction /// that indicates how instruction selection should deal with the operation. diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index f779894..91c65e8 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1181,7 +1181,10 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { break; } - case ISD::ATOMIC_CMP_SWAP: { + case ISD::ATOMIC_CMP_SWAP_8: + case ISD::ATOMIC_CMP_SWAP_16: + case ISD::ATOMIC_CMP_SWAP_32: + case ISD::ATOMIC_CMP_SWAP_64: { unsigned int num_operands = 4; assert(Node->getNumOperands() == num_operands && "Invalid Atomic node!"); SDValue Ops[4]; @@ -1201,17 +1204,50 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); return Result.getValue(Op.getResNo()); } - case ISD::ATOMIC_LOAD_ADD: - case ISD::ATOMIC_LOAD_SUB: - case ISD::ATOMIC_LOAD_AND: - case ISD::ATOMIC_LOAD_OR: - case ISD::ATOMIC_LOAD_XOR: - case ISD::ATOMIC_LOAD_NAND: - case ISD::ATOMIC_LOAD_MIN: - case ISD::ATOMIC_LOAD_MAX: - case ISD::ATOMIC_LOAD_UMIN: - case ISD::ATOMIC_LOAD_UMAX: - case ISD::ATOMIC_SWAP: { + case ISD::ATOMIC_LOAD_ADD_8: + case ISD::ATOMIC_LOAD_SUB_8: + case ISD::ATOMIC_LOAD_AND_8: + case ISD::ATOMIC_LOAD_OR_8: + case ISD::ATOMIC_LOAD_XOR_8: + case ISD::ATOMIC_LOAD_NAND_8: + case ISD::ATOMIC_LOAD_MIN_8: + case ISD::ATOMIC_LOAD_MAX_8: + case ISD::ATOMIC_LOAD_UMIN_8: + case ISD::ATOMIC_LOAD_UMAX_8: + case ISD::ATOMIC_SWAP_8: + case ISD::ATOMIC_LOAD_ADD_16: + case ISD::ATOMIC_LOAD_SUB_16: + case ISD::ATOMIC_LOAD_AND_16: + case ISD::ATOMIC_LOAD_OR_16: + case ISD::ATOMIC_LOAD_XOR_16: + case ISD::ATOMIC_LOAD_NAND_16: + case ISD::ATOMIC_LOAD_MIN_16: + case ISD::ATOMIC_LOAD_MAX_16: + case ISD::ATOMIC_LOAD_UMIN_16: + case ISD::ATOMIC_LOAD_UMAX_16: + case ISD::ATOMIC_SWAP_16: + case ISD::ATOMIC_LOAD_ADD_32: + case ISD::ATOMIC_LOAD_SUB_32: + case ISD::ATOMIC_LOAD_AND_32: + case ISD::ATOMIC_LOAD_OR_32: + case ISD::ATOMIC_LOAD_XOR_32: + case ISD::ATOMIC_LOAD_NAND_32: + case ISD::ATOMIC_LOAD_MIN_32: + case ISD::ATOMIC_LOAD_MAX_32: + case ISD::ATOMIC_LOAD_UMIN_32: + case ISD::ATOMIC_LOAD_UMAX_32: + case ISD::ATOMIC_SWAP_32: + case ISD::ATOMIC_LOAD_ADD_64: + case ISD::ATOMIC_LOAD_SUB_64: + case ISD::ATOMIC_LOAD_AND_64: + case ISD::ATOMIC_LOAD_OR_64: + case ISD::ATOMIC_LOAD_XOR_64: + case ISD::ATOMIC_LOAD_NAND_64: + case ISD::ATOMIC_LOAD_MIN_64: + case ISD::ATOMIC_LOAD_MAX_64: + case ISD::ATOMIC_LOAD_UMIN_64: + case ISD::ATOMIC_LOAD_UMAX_64: + case ISD::ATOMIC_SWAP_64: { unsigned int num_operands = 3; assert(Node->getNumOperands() == num_operands && "Invalid Atomic node!"); SDValue Ops[3]; @@ -4155,7 +4191,10 @@ SDValue SelectionDAGLegalize::PromoteOp(SDValue Op) { break; } - case ISD::ATOMIC_CMP_SWAP: { + case ISD::ATOMIC_CMP_SWAP_8: + case ISD::ATOMIC_CMP_SWAP_16: + case ISD::ATOMIC_CMP_SWAP_32: + case ISD::ATOMIC_CMP_SWAP_64: { AtomicSDNode* AtomNode = cast<AtomicSDNode>(Node); Tmp2 = PromoteOp(Node->getOperand(2)); Tmp3 = PromoteOp(Node->getOperand(3)); @@ -4167,17 +4206,50 @@ SDValue SelectionDAGLegalize::PromoteOp(SDValue Op) { AddLegalizedOperand(Op.getValue(1), LegalizeOp(Result.getValue(1))); break; } - case ISD::ATOMIC_LOAD_ADD: - case ISD::ATOMIC_LOAD_SUB: - case ISD::ATOMIC_LOAD_AND: - case ISD::ATOMIC_LOAD_OR: - case ISD::ATOMIC_LOAD_XOR: - case ISD::ATOMIC_LOAD_NAND: - case ISD::ATOMIC_LOAD_MIN: - case ISD::ATOMIC_LOAD_MAX: - case ISD::ATOMIC_LOAD_UMIN: - case ISD::ATOMIC_LOAD_UMAX: - case ISD::ATOMIC_SWAP: { + case ISD::ATOMIC_LOAD_ADD_8: + case ISD::ATOMIC_LOAD_SUB_8: + case ISD::ATOMIC_LOAD_AND_8: + case ISD::ATOMIC_LOAD_OR_8: + case ISD::ATOMIC_LOAD_XOR_8: + case ISD::ATOMIC_LOAD_NAND_8: + case ISD::ATOMIC_LOAD_MIN_8: + case ISD::ATOMIC_LOAD_MAX_8: + case ISD::ATOMIC_LOAD_UMIN_8: + case ISD::ATOMIC_LOAD_UMAX_8: + case ISD::ATOMIC_SWAP_8: + case ISD::ATOMIC_LOAD_ADD_16: + case ISD::ATOMIC_LOAD_SUB_16: + case ISD::ATOMIC_LOAD_AND_16: + case ISD::ATOMIC_LOAD_OR_16: + case ISD::ATOMIC_LOAD_XOR_16: + case ISD::ATOMIC_LOAD_NAND_16: + case ISD::ATOMIC_LOAD_MIN_16: + case ISD::ATOMIC_LOAD_MAX_16: + case ISD::ATOMIC_LOAD_UMIN_16: + case ISD::ATOMIC_LOAD_UMAX_16: + case ISD::ATOMIC_SWAP_16: + case ISD::ATOMIC_LOAD_ADD_32: + case ISD::ATOMIC_LOAD_SUB_32: + case ISD::ATOMIC_LOAD_AND_32: + case ISD::ATOMIC_LOAD_OR_32: + case ISD::ATOMIC_LOAD_XOR_32: + case ISD::ATOMIC_LOAD_NAND_32: + case ISD::ATOMIC_LOAD_MIN_32: + case ISD::ATOMIC_LOAD_MAX_32: + case ISD::ATOMIC_LOAD_UMIN_32: + case ISD::ATOMIC_LOAD_UMAX_32: + case ISD::ATOMIC_SWAP_32: + case ISD::ATOMIC_LOAD_ADD_64: + case ISD::ATOMIC_LOAD_SUB_64: + case ISD::ATOMIC_LOAD_AND_64: + case ISD::ATOMIC_LOAD_OR_64: + case ISD::ATOMIC_LOAD_XOR_64: + case ISD::ATOMIC_LOAD_NAND_64: + case ISD::ATOMIC_LOAD_MIN_64: + case ISD::ATOMIC_LOAD_MAX_64: + case ISD::ATOMIC_LOAD_UMIN_64: + case ISD::ATOMIC_LOAD_UMAX_64: + case ISD::ATOMIC_SWAP_64: { AtomicSDNode* AtomNode = cast<AtomicSDNode>(Node); Tmp2 = PromoteOp(Node->getOperand(2)); Result = DAG.getAtomic(Node->getOpcode(), AtomNode->getChain(), @@ -6092,7 +6164,11 @@ void SelectionDAGLegalize::ExpandOp(SDValue Op, SDValue &Lo, SDValue &Hi){ break; } - case ISD::ATOMIC_CMP_SWAP: { + // FIXME: should the LOAD_BIN and SWAP atomics get here too? Probably. + case ISD::ATOMIC_CMP_SWAP_8: + case ISD::ATOMIC_CMP_SWAP_16: + case ISD::ATOMIC_CMP_SWAP_32: + case ISD::ATOMIC_CMP_SWAP_64: { SDValue Tmp = TLI.LowerOperation(Op, DAG); assert(Tmp.Val && "Node must be custom expanded!"); ExpandOp(Tmp.getValue(0), Lo, Hi); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 0bd1a4d..45026dc 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -439,18 +439,54 @@ static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { ID.AddInteger(ST->getRawFlags()); break; } - case ISD::ATOMIC_CMP_SWAP: - case ISD::ATOMIC_LOAD_ADD: - case ISD::ATOMIC_SWAP: - case ISD::ATOMIC_LOAD_SUB: - case ISD::ATOMIC_LOAD_AND: - case ISD::ATOMIC_LOAD_OR: - case ISD::ATOMIC_LOAD_XOR: - case ISD::ATOMIC_LOAD_NAND: - case ISD::ATOMIC_LOAD_MIN: - case ISD::ATOMIC_LOAD_MAX: - case ISD::ATOMIC_LOAD_UMIN: - case ISD::ATOMIC_LOAD_UMAX: { + case ISD::ATOMIC_CMP_SWAP_8: + case ISD::ATOMIC_SWAP_8: + case ISD::ATOMIC_LOAD_ADD_8: + case ISD::ATOMIC_LOAD_SUB_8: + case ISD::ATOMIC_LOAD_AND_8: + case ISD::ATOMIC_LOAD_OR_8: + case ISD::ATOMIC_LOAD_XOR_8: + case ISD::ATOMIC_LOAD_NAND_8: + case ISD::ATOMIC_LOAD_MIN_8: + case ISD::ATOMIC_LOAD_MAX_8: + case ISD::ATOMIC_LOAD_UMIN_8: + case ISD::ATOMIC_LOAD_UMAX_8: + case ISD::ATOMIC_CMP_SWAP_16: + case ISD::ATOMIC_SWAP_16: + case ISD::ATOMIC_LOAD_ADD_16: + case ISD::ATOMIC_LOAD_SUB_16: + case ISD::ATOMIC_LOAD_AND_16: + case ISD::ATOMIC_LOAD_OR_16: + case ISD::ATOMIC_LOAD_XOR_16: + case ISD::ATOMIC_LOAD_NAND_16: + case ISD::ATOMIC_LOAD_MIN_16: + case ISD::ATOMIC_LOAD_MAX_16: + case ISD::ATOMIC_LOAD_UMIN_16: + case ISD::ATOMIC_LOAD_UMAX_16: + case ISD::ATOMIC_CMP_SWAP_32: + case ISD::ATOMIC_SWAP_32: + case ISD::ATOMIC_LOAD_ADD_32: + case ISD::ATOMIC_LOAD_SUB_32: + case ISD::ATOMIC_LOAD_AND_32: + case ISD::ATOMIC_LOAD_OR_32: + case ISD::ATOMIC_LOAD_XOR_32: + case ISD::ATOMIC_LOAD_NAND_32: + case ISD::ATOMIC_LOAD_MIN_32: + case ISD::ATOMIC_LOAD_MAX_32: + case ISD::ATOMIC_LOAD_UMIN_32: + case ISD::ATOMIC_LOAD_UMAX_32: + case ISD::ATOMIC_CMP_SWAP_64: + case ISD::ATOMIC_SWAP_64: + case ISD::ATOMIC_LOAD_ADD_64: + case ISD::ATOMIC_LOAD_SUB_64: + case ISD::ATOMIC_LOAD_AND_64: + case ISD::ATOMIC_LOAD_OR_64: + case ISD::ATOMIC_LOAD_XOR_64: + case ISD::ATOMIC_LOAD_NAND_64: + case ISD::ATOMIC_LOAD_MIN_64: + case ISD::ATOMIC_LOAD_MAX_64: + case ISD::ATOMIC_LOAD_UMIN_64: + case ISD::ATOMIC_LOAD_UMAX_64: { const AtomicSDNode *AT = cast<AtomicSDNode>(N); ID.AddInteger(AT->getRawFlags()); break; @@ -3149,7 +3185,10 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, const Value* PtrVal, unsigned Alignment) { - assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op"); + assert((Opcode == ISD::ATOMIC_CMP_SWAP_8 || + Opcode == ISD::ATOMIC_CMP_SWAP_16 || + Opcode == ISD::ATOMIC_CMP_SWAP_32 || + Opcode == ISD::ATOMIC_CMP_SWAP_64) && "Invalid Atomic Op"); assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); MVT VT = Cmp.getValueType(); @@ -3175,13 +3214,50 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDValue Chain, SDValue Ptr, SDValue Val, const Value* PtrVal, unsigned Alignment) { - assert(( Opcode == ISD::ATOMIC_LOAD_ADD || Opcode == ISD::ATOMIC_LOAD_SUB - || Opcode == ISD::ATOMIC_SWAP || Opcode == ISD::ATOMIC_LOAD_AND - || Opcode == ISD::ATOMIC_LOAD_OR || Opcode == ISD::ATOMIC_LOAD_XOR - || Opcode == ISD::ATOMIC_LOAD_NAND - || Opcode == ISD::ATOMIC_LOAD_MIN || Opcode == ISD::ATOMIC_LOAD_MAX - || Opcode == ISD::ATOMIC_LOAD_UMIN || Opcode == ISD::ATOMIC_LOAD_UMAX) - && "Invalid Atomic Op"); + assert((Opcode == ISD::ATOMIC_LOAD_ADD_8 || + Opcode == ISD::ATOMIC_LOAD_SUB_8 || + Opcode == ISD::ATOMIC_LOAD_AND_8 || + Opcode == ISD::ATOMIC_LOAD_OR_8 || + Opcode == ISD::ATOMIC_LOAD_XOR_8 || + Opcode == ISD::ATOMIC_LOAD_NAND_8 || + Opcode == ISD::ATOMIC_LOAD_MIN_8 || + Opcode == ISD::ATOMIC_LOAD_MAX_8 || + Opcode == ISD::ATOMIC_LOAD_UMIN_8 || + Opcode == ISD::ATOMIC_LOAD_UMAX_8 || + Opcode == ISD::ATOMIC_SWAP_8 || + Opcode == ISD::ATOMIC_LOAD_ADD_16 || + Opcode == ISD::ATOMIC_LOAD_SUB_16 || + Opcode == ISD::ATOMIC_LOAD_AND_16 || + Opcode == ISD::ATOMIC_LOAD_OR_16 || + Opcode == ISD::ATOMIC_LOAD_XOR_16 || + Opcode == ISD::ATOMIC_LOAD_NAND_16 || + Opcode == ISD::ATOMIC_LOAD_MIN_16 || + Opcode == ISD::ATOMIC_LOAD_MAX_16 || + Opcode == ISD::ATOMIC_LOAD_UMIN_16 || + Opcode == ISD::ATOMIC_LOAD_UMAX_16 || + Opcode == ISD::ATOMIC_SWAP_16 || + Opcode == ISD::ATOMIC_LOAD_ADD_32 || + Opcode == ISD::ATOMIC_LOAD_SUB_32 || + Opcode == ISD::ATOMIC_LOAD_AND_32 || + Opcode == ISD::ATOMIC_LOAD_OR_32 || + Opcode == ISD::ATOMIC_LOAD_XOR_32 || + Opcode == ISD::ATOMIC_LOAD_NAND_32 || + Opcode == ISD::ATOMIC_LOAD_MIN_32 || + Opcode == ISD::ATOMIC_LOAD_MAX_32 || + Opcode == ISD::ATOMIC_LOAD_UMIN_32 || + Opcode == ISD::ATOMIC_LOAD_UMAX_32 || + Opcode == ISD::ATOMIC_SWAP_32 || + Opcode == ISD::ATOMIC_LOAD_ADD_64 || + Opcode == ISD::ATOMIC_LOAD_SUB_64 || + Opcode == ISD::ATOMIC_LOAD_AND_64 || + Opcode == ISD::ATOMIC_LOAD_OR_64 || + Opcode == ISD::ATOMIC_LOAD_XOR_64 || + Opcode == ISD::ATOMIC_LOAD_NAND_64 || + Opcode == ISD::ATOMIC_LOAD_MIN_64 || + Opcode == ISD::ATOMIC_LOAD_MAX_64 || + Opcode == ISD::ATOMIC_LOAD_UMIN_64 || + Opcode == ISD::ATOMIC_LOAD_UMAX_64 || + Opcode == ISD::ATOMIC_SWAP_64) && "Invalid Atomic Op"); MVT VT = Val.getValueType(); @@ -4721,18 +4797,54 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const { #endif case ISD::PREFETCH: return "Prefetch"; case ISD::MEMBARRIER: return "MemBarrier"; - case ISD::ATOMIC_CMP_SWAP: return "AtomicCmpSwap"; - case ISD::ATOMIC_LOAD_ADD: return "AtomicLoadAdd"; - case ISD::ATOMIC_LOAD_SUB: return "AtomicLoadSub"; - case ISD::ATOMIC_LOAD_AND: return "AtomicLoadAnd"; - case ISD::ATOMIC_LOAD_OR: return "AtomicLoadOr"; - case ISD::ATOMIC_LOAD_XOR: return "AtomicLoadXor"; - case ISD::ATOMIC_LOAD_NAND: return "AtomicLoadNand"; - case ISD::ATOMIC_LOAD_MIN: return "AtomicLoadMin"; - case ISD::ATOMIC_LOAD_MAX: return "AtomicLoadMax"; - case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin"; - case ISD::ATOMIC_LOAD_UMAX: return "AtomicLoadUMax"; - case ISD::ATOMIC_SWAP: return "AtomicSWAP"; + case ISD::ATOMIC_CMP_SWAP_8: return "AtomicCmpSwap8"; + case ISD::ATOMIC_SWAP_8: return "AtomicSwap8"; + case ISD::ATOMIC_LOAD_ADD_8: return "AtomicLoadAdd8"; + case ISD::ATOMIC_LOAD_SUB_8: return "AtomicLoadSub8"; + case ISD::ATOMIC_LOAD_AND_8: return "AtomicLoadAnd8"; + case ISD::ATOMIC_LOAD_OR_8: return "AtomicLoadOr8"; + case ISD::ATOMIC_LOAD_XOR_8: return "AtomicLoadXor8"; + case ISD::ATOMIC_LOAD_NAND_8: return "AtomicLoadNand8"; + case ISD::ATOMIC_LOAD_MIN_8: return "AtomicLoadMin8"; + case ISD::ATOMIC_LOAD_MAX_8: return "AtomicLoadMax8"; + case ISD::ATOMIC_LOAD_UMIN_8: return "AtomicLoadUMin8"; + case ISD::ATOMIC_LOAD_UMAX_8: return "AtomicLoadUMax8"; + case ISD::ATOMIC_CMP_SWAP_16: return "AtomicCmpSwap16"; + case ISD::ATOMIC_SWAP_16: return "AtomicSwap16"; + case ISD::ATOMIC_LOAD_ADD_16: return "AtomicLoadAdd16"; + case ISD::ATOMIC_LOAD_SUB_16: return "AtomicLoadSub16"; + case ISD::ATOMIC_LOAD_AND_16: return "AtomicLoadAnd16"; + case ISD::ATOMIC_LOAD_OR_16: return "AtomicLoadOr16"; + case ISD::ATOMIC_LOAD_XOR_16: return "AtomicLoadXor16"; + case ISD::ATOMIC_LOAD_NAND_16: return "AtomicLoadNand16"; + case ISD::ATOMIC_LOAD_MIN_16: return "AtomicLoadMin16"; + case ISD::ATOMIC_LOAD_MAX_16: return "AtomicLoadMax16"; + case ISD::ATOMIC_LOAD_UMIN_16: return "AtomicLoadUMin16"; + case ISD::ATOMIC_LOAD_UMAX_16: return "AtomicLoadUMax16"; + case ISD::ATOMIC_CMP_SWAP_32: return "AtomicCmpSwap32"; + case ISD::ATOMIC_SWAP_32: return "AtomicSwap32"; + case ISD::ATOMIC_LOAD_ADD_32: return "AtomicLoadAdd32"; + case ISD::ATOMIC_LOAD_SUB_32: return "AtomicLoadSub32"; + case ISD::ATOMIC_LOAD_AND_32: return "AtomicLoadAnd32"; + case ISD::ATOMIC_LOAD_OR_32: return "AtomicLoadOr32"; + case ISD::ATOMIC_LOAD_XOR_32: return "AtomicLoadXor32"; + case ISD::ATOMIC_LOAD_NAND_32: return "AtomicLoadNand32"; + case ISD::ATOMIC_LOAD_MIN_32: return "AtomicLoadMin32"; + case ISD::ATOMIC_LOAD_MAX_32: return "AtomicLoadMax32"; + case ISD::ATOMIC_LOAD_UMIN_32: return "AtomicLoadUMin32"; + case ISD::ATOMIC_LOAD_UMAX_32: return "AtomicLoadUMax32"; + case ISD::ATOMIC_CMP_SWAP_64: return "AtomicCmpSwap64"; + case ISD::ATOMIC_SWAP_64: return "AtomicSwap64"; + case ISD::ATOMIC_LOAD_ADD_64: return "AtomicLoadAdd64"; + case ISD::ATOMIC_LOAD_SUB_64: return "AtomicLoadSub64"; + case ISD::ATOMIC_LOAD_AND_64: return "AtomicLoadAnd64"; + case ISD::ATOMIC_LOAD_OR_64: return "AtomicLoadOr64"; + case ISD::ATOMIC_LOAD_XOR_64: return "AtomicLoadXor64"; + case ISD::ATOMIC_LOAD_NAND_64: return "AtomicLoadNand64"; + case ISD::ATOMIC_LOAD_MIN_64: return "AtomicLoadMin64"; + case ISD::ATOMIC_LOAD_MAX_64: return "AtomicLoadMax64"; + case ISD::ATOMIC_LOAD_UMIN_64: return "AtomicLoadUMin64"; + case ISD::ATOMIC_LOAD_UMAX_64: return "AtomicLoadUMax64"; case ISD::PCMARKER: return "PCMarker"; case ISD::READCYCLECOUNTER: return "ReadCycleCounter"; case ISD::SRCVALUE: return "SrcValue"; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index e0ecda4..83fe4f5 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -3664,37 +3664,198 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { } case Intrinsic::atomic_cmp_swap: { SDValue Root = getRoot(); - SDValue L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, Root, - getValue(I.getOperand(1)), - getValue(I.getOperand(2)), - getValue(I.getOperand(3)), - I.getOperand(1)); + SDValue L; + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_8, Root, + getValue(I.getOperand(1)), + getValue(I.getOperand(2)), + getValue(I.getOperand(3)), + I.getOperand(1)); + break; + case MVT::i16: + L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_16, Root, + getValue(I.getOperand(1)), + getValue(I.getOperand(2)), + getValue(I.getOperand(3)), + I.getOperand(1)); + break; + case MVT::i32: + L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_32, Root, + getValue(I.getOperand(1)), + getValue(I.getOperand(2)), + getValue(I.getOperand(3)), + I.getOperand(1)); + break; + case MVT::i64: + L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_64, Root, + getValue(I.getOperand(1)), + getValue(I.getOperand(2)), + getValue(I.getOperand(3)), + I.getOperand(1)); + break; + default: + assert(0 && "Invalid atomic type"); + abort(); + } setValue(&I, L); DAG.setRoot(L.getValue(1)); return 0; } case Intrinsic::atomic_load_add: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD); + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_8); + case MVT::i16: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_16); + case MVT::i32: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_32); + case MVT::i64: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_64); + default: + assert(0 && "Invalid atomic type"); + abort(); + } case Intrinsic::atomic_load_sub: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB); - case Intrinsic::atomic_load_and: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND); + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_8); + case MVT::i16: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_16); + case MVT::i32: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_32); + case MVT::i64: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_64); + default: + assert(0 && "Invalid atomic type"); + abort(); + } case Intrinsic::atomic_load_or: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR); + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_8); + case MVT::i16: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_16); + case MVT::i32: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_32); + case MVT::i64: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_64); + default: + assert(0 && "Invalid atomic type"); + abort(); + } case Intrinsic::atomic_load_xor: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR); + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_8); + case MVT::i16: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_16); + case MVT::i32: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_32); + case MVT::i64: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_64); + default: + assert(0 && "Invalid atomic type"); + abort(); + } + case Intrinsic::atomic_load_and: + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_8); + case MVT::i16: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_16); + case MVT::i32: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_32); + case MVT::i64: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_64); + default: + assert(0 && "Invalid atomic type"); + abort(); + } case Intrinsic::atomic_load_nand: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND); - case Intrinsic::atomic_load_min: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN); + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_8); + case MVT::i16: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_16); + case MVT::i32: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_32); + case MVT::i64: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_64); + default: + assert(0 && "Invalid atomic type"); + abort(); + } case Intrinsic::atomic_load_max: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX); + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_8); + case MVT::i16: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_16); + case MVT::i32: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_32); + case MVT::i64: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_64); + default: + assert(0 && "Invalid atomic type"); + abort(); + } + case Intrinsic::atomic_load_min: + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_8); + case MVT::i16: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_16); + case MVT::i32: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_32); + case MVT::i64: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_64); + default: + assert(0 && "Invalid atomic type"); + abort(); + } case Intrinsic::atomic_load_umin: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN); + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_8); + case MVT::i16: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_16); + case MVT::i32: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_32); + case MVT::i64: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_64); + default: + assert(0 && "Invalid atomic type"); + abort(); + } case Intrinsic::atomic_load_umax: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX); + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_8); + case MVT::i16: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_16); + case MVT::i32: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_32); + case MVT::i64: + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_64); + default: + assert(0 && "Invalid atomic type"); + abort(); + } case Intrinsic::atomic_swap: - return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP); + switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { + case MVT::i8: + return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_8); + case MVT::i16: + return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_16); + case MVT::i32: + return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_32); + case MVT::i64: + return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_64); + default: + assert(0 && "Invalid atomic type"); + abort(); + } } } diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td index e85589f..230c956 100644 --- a/lib/Target/TargetSelectionDAG.td +++ b/lib/Target/TargetSelectionDAG.td @@ -358,30 +358,101 @@ def prefetch : SDNode<"ISD::PREFETCH" , STDPrefetch, def membarrier : SDNode<"ISD::MEMBARRIER" , STDMemBarrier, [SDNPHasChain, SDNPSideEffect]>; -// Do not use atomic_* directly, use atomic_*_size (see below) -def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3, +def atomic_cmp_swap_8 : SDNode<"ISD::ATOMIC_CMP_SWAP_8" , STDAtomic3, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2, +def atomic_load_add_8 : SDNode<"ISD::ATOMIC_LOAD_ADD_8" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2, +def atomic_swap_8 : SDNode<"ISD::ATOMIC_SWAP_8", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2, +def atomic_load_sub_8 : SDNode<"ISD::ATOMIC_LOAD_SUB_8" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2, +def atomic_load_and_8 : SDNode<"ISD::ATOMIC_LOAD_AND_8" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2, +def atomic_load_or_8 : SDNode<"ISD::ATOMIC_LOAD_OR_8" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2, +def atomic_load_xor_8 : SDNode<"ISD::ATOMIC_LOAD_XOR_8" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2, +def atomic_load_nand_8: SDNode<"ISD::ATOMIC_LOAD_NAND_8", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2, +def atomic_load_min_8 : SDNode<"ISD::ATOMIC_LOAD_MIN_8", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2, +def atomic_load_max_8 : SDNode<"ISD::ATOMIC_LOAD_MAX_8", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2, +def atomic_load_umin_8 : SDNode<"ISD::ATOMIC_LOAD_UMIN_8", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2, +def atomic_load_umax_8 : SDNode<"ISD::ATOMIC_LOAD_UMAX_8", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_cmp_swap_16 : SDNode<"ISD::ATOMIC_CMP_SWAP_16" , STDAtomic3, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_add_16 : SDNode<"ISD::ATOMIC_LOAD_ADD_16" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_swap_16 : SDNode<"ISD::ATOMIC_SWAP_16", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_sub_16 : SDNode<"ISD::ATOMIC_LOAD_SUB_16" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_and_16 : SDNode<"ISD::ATOMIC_LOAD_AND_16" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_or_16 : SDNode<"ISD::ATOMIC_LOAD_OR_16" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_xor_16 : SDNode<"ISD::ATOMIC_LOAD_XOR_16" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_nand_16: SDNode<"ISD::ATOMIC_LOAD_NAND_16", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_min_16 : SDNode<"ISD::ATOMIC_LOAD_MIN_16", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_max_16 : SDNode<"ISD::ATOMIC_LOAD_MAX_16", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_umin_16 : SDNode<"ISD::ATOMIC_LOAD_UMIN_16", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_umax_16 : SDNode<"ISD::ATOMIC_LOAD_UMAX_16", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_cmp_swap_32 : SDNode<"ISD::ATOMIC_CMP_SWAP_32" , STDAtomic3, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_add_32 : SDNode<"ISD::ATOMIC_LOAD_ADD_32" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_swap_32 : SDNode<"ISD::ATOMIC_SWAP_32", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_sub_32 : SDNode<"ISD::ATOMIC_LOAD_SUB_32" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_and_32 : SDNode<"ISD::ATOMIC_LOAD_AND_32" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_or_32 : SDNode<"ISD::ATOMIC_LOAD_OR_32" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_xor_32 : SDNode<"ISD::ATOMIC_LOAD_XOR_32" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_nand_32: SDNode<"ISD::ATOMIC_LOAD_NAND_32", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_min_32 : SDNode<"ISD::ATOMIC_LOAD_MIN_32", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_max_32 : SDNode<"ISD::ATOMIC_LOAD_MAX_32", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_umin_32 : SDNode<"ISD::ATOMIC_LOAD_UMIN_32", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_umax_32 : SDNode<"ISD::ATOMIC_LOAD_UMAX_32", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_cmp_swap_64 : SDNode<"ISD::ATOMIC_CMP_SWAP_64" , STDAtomic3, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_add_64 : SDNode<"ISD::ATOMIC_LOAD_ADD_64" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_swap_64 : SDNode<"ISD::ATOMIC_SWAP_64", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_sub_64 : SDNode<"ISD::ATOMIC_LOAD_SUB_64" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_and_64 : SDNode<"ISD::ATOMIC_LOAD_AND_64" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_or_64 : SDNode<"ISD::ATOMIC_LOAD_OR_64" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_xor_64 : SDNode<"ISD::ATOMIC_LOAD_XOR_64" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_nand_64: SDNode<"ISD::ATOMIC_LOAD_NAND_64", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_min_64 : SDNode<"ISD::ATOMIC_LOAD_MIN_64", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_max_64 : SDNode<"ISD::ATOMIC_LOAD_MAX_64", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_umin_64 : SDNode<"ISD::ATOMIC_LOAD_UMIN_64", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_umax_64 : SDNode<"ISD::ATOMIC_LOAD_UMAX_64", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; // Do not use ld, st directly. Use load, extload, sextload, zextload, store, @@ -724,177 +795,6 @@ def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32; }]>; -// Atomic patterns -def atomic_cmp_swap_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), - (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i8; -}]>; -def atomic_cmp_swap_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), - (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i16; -}]>; -def atomic_cmp_swap_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), - (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i32; -}]>; -def atomic_cmp_swap_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), - (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i64; -}]>; - -def atomic_load_add_8 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_add node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i8; -}]>; -def atomic_load_add_16 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_add node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i16; -}]>; -def atomic_load_add_32 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_add node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i32; -}]>; -def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_add node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i64; -}]>; - -def atomic_load_sub_8 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_sub node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i8; -}]>; -def atomic_load_sub_16 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_sub node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i16; -}]>; -def atomic_load_sub_32 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_sub node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i32; -}]>; -def atomic_load_sub_64 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_sub node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i64; -}]>; - -def atomic_load_and_8 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_and node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i8; -}]>; -def atomic_load_and_16 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_and node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i16; -}]>; -def atomic_load_and_32 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_and node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i32; -}]>; -def atomic_load_and_64 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_and node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i64; -}]>; - -def atomic_load_or_8 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_or node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i8; -}]>; -def atomic_load_or_16 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_or node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i16; -}]>; -def atomic_load_or_32 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_or node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i32; -}]>; -def atomic_load_or_64 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_or node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i64; -}]>; - -def atomic_load_xor_8 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_xor node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i8; -}]>; -def atomic_load_xor_16 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_xor node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i16; -}]>; -def atomic_load_xor_32 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_xor node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i32; -}]>; -def atomic_load_xor_64 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_xor node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i64; -}]>; - -def atomic_load_nand_8 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_nand node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i8; -}]>; -def atomic_load_nand_16 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_nand node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i16; -}]>; -def atomic_load_nand_32 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_nand node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i32; -}]>; -def atomic_load_nand_64 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_load_nand node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i64; -}]>; - -def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_swap node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i8; -}]>; -def atomic_swap_16 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_swap node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i16; -}]>; -def atomic_swap_32 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_swap node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i32; -}]>; -def atomic_swap_64 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_swap node:$ptr, node:$inc), [{ - AtomicSDNode* V = cast<AtomicSDNode>(N); - return V->getValueType(0) == MVT::i64; -}]>; - - - // setcc convenience fragments. def setoeq : PatFrag<(ops node:$lhs, node:$rhs), (setcc node:$lhs, node:$rhs, SETOEQ)>; diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 9cc1bed..3bc5592 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -293,15 +293,15 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); // Expand certain atomics - setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i8, Custom); - setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i16, Custom); - setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP_8 , MVT::i8, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP_16, MVT::i16, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP_32, MVT::i32, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP_64, MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i8, Expand); - setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i16, Expand); - setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i64, Expand); + setOperationAction(ISD::ATOMIC_LOAD_SUB_8, MVT::i8, Expand); + setOperationAction(ISD::ATOMIC_LOAD_SUB_16, MVT::i16, Expand); + setOperationAction(ISD::ATOMIC_LOAD_SUB_32, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Expand); // Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion. setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand); @@ -5914,8 +5914,11 @@ SDNode* X86TargetLowering::ExpandATOMIC_LOAD_SUB(SDNode* Op, SelectionDAG &DAG) MVT T = Op->getValueType(0); SDValue negOp = DAG.getNode(ISD::SUB, T, DAG.getConstant(0, T), Op->getOperand(2)); - return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, Op->getOperand(0), - Op->getOperand(1), negOp, + return DAG.getAtomic((T==MVT::i8 ? ISD::ATOMIC_LOAD_ADD_8: + T==MVT::i16 ? ISD::ATOMIC_LOAD_ADD_16: + T==MVT::i32 ? ISD::ATOMIC_LOAD_ADD_32: + T==MVT::i64 ? ISD::ATOMIC_LOAD_ADD_64: 0), + Op->getOperand(0), Op->getOperand(1), negOp, cast<AtomicSDNode>(Op)->getSrcValue(), cast<AtomicSDNode>(Op)->getAlignment()).Val; } @@ -5925,7 +5928,10 @@ SDNode* X86TargetLowering::ExpandATOMIC_LOAD_SUB(SDNode* Op, SelectionDAG &DAG) SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { default: assert(0 && "Should not custom lower this!"); - case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); + case ISD::ATOMIC_CMP_SWAP_8: return LowerCMP_SWAP(Op,DAG); + case ISD::ATOMIC_CMP_SWAP_16: return LowerCMP_SWAP(Op,DAG); + case ISD::ATOMIC_CMP_SWAP_32: return LowerCMP_SWAP(Op,DAG); + case ISD::ATOMIC_CMP_SWAP_64: return LowerCMP_SWAP(Op,DAG); case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); @@ -5979,8 +5985,11 @@ SDNode *X86TargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) { default: assert(0 && "Should not custom lower this!"); case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); - case ISD::ATOMIC_CMP_SWAP: return ExpandATOMIC_CMP_SWAP(N, DAG); - case ISD::ATOMIC_LOAD_SUB: return ExpandATOMIC_LOAD_SUB(N,DAG); + case ISD::ATOMIC_CMP_SWAP_64: return ExpandATOMIC_CMP_SWAP(N, DAG); + case ISD::ATOMIC_LOAD_SUB_8: return ExpandATOMIC_LOAD_SUB(N,DAG); + case ISD::ATOMIC_LOAD_SUB_16: return ExpandATOMIC_LOAD_SUB(N,DAG); + case ISD::ATOMIC_LOAD_SUB_32: return ExpandATOMIC_LOAD_SUB(N,DAG); + case ISD::ATOMIC_LOAD_SUB_64: return ExpandATOMIC_LOAD_SUB(N,DAG); } } diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index 93cd0c4..239ae97 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -1153,28 +1153,28 @@ let Constraints = "$val = $dst", Defs = [EFLAGS], usesCustomDAGSchedInserter = 1 in { def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), "#ATOMAND64 PSUEDO!", - [(set GR64:$dst, (atomic_load_and addr:$ptr, GR64:$val))]>; + [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>; def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), "#ATOMOR64 PSUEDO!", - [(set GR64:$dst, (atomic_load_or addr:$ptr, GR64:$val))]>; + [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>; def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), "#ATOMXOR64 PSUEDO!", - [(set GR64:$dst, (atomic_load_xor addr:$ptr, GR64:$val))]>; + [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>; def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), "#ATOMNAND64 PSUEDO!", - [(set GR64:$dst, (atomic_load_nand addr:$ptr, GR64:$val))]>; + [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>; def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val), "#ATOMMIN64 PSUEDO!", - [(set GR64:$dst, (atomic_load_min addr:$ptr, GR64:$val))]>; + [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>; def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), "#ATOMMAX64 PSUEDO!", - [(set GR64:$dst, (atomic_load_max addr:$ptr, GR64:$val))]>; + [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>; def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), "#ATOMUMIN64 PSUEDO!", - [(set GR64:$dst, (atomic_load_umin addr:$ptr, GR64:$val))]>; + [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>; def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), "#ATOMUMAX64 PSUEDO!", - [(set GR64:$dst, (atomic_load_umax addr:$ptr, GR64:$val))]>; + [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index e55edce..ee84fc1 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -2637,66 +2637,66 @@ let Constraints = "$val = $dst", Defs = [EFLAGS], usesCustomDAGSchedInserter = 1 in { def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), "#ATOMAND32 PSUEDO!", - [(set GR32:$dst, (atomic_load_and addr:$ptr, GR32:$val))]>; + [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>; def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), "#ATOMOR32 PSUEDO!", - [(set GR32:$dst, (atomic_load_or addr:$ptr, GR32:$val))]>; + [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>; def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), "#ATOMXOR32 PSUEDO!", - [(set GR32:$dst, (atomic_load_xor addr:$ptr, GR32:$val))]>; + [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>; def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), "#ATOMNAND32 PSUEDO!", - [(set GR32:$dst, (atomic_load_nand addr:$ptr, GR32:$val))]>; + [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>; def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), "#ATOMMIN32 PSUEDO!", - [(set GR32:$dst, (atomic_load_min addr:$ptr, GR32:$val))]>; + [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>; def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), "#ATOMMAX32 PSUEDO!", - [(set GR32:$dst, (atomic_load_max addr:$ptr, GR32:$val))]>; + [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>; def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), "#ATOMUMIN32 PSUEDO!", - [(set GR32:$dst, (atomic_load_umin addr:$ptr, GR32:$val))]>; + [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>; def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), "#ATOMUMAX32 PSUEDO!", - [(set GR32:$dst, (atomic_load_umax addr:$ptr, GR32:$val))]>; + [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>; def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), "#ATOMAND16 PSUEDO!", - [(set GR16:$dst, (atomic_load_and addr:$ptr, GR16:$val))]>; + [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>; def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), "#ATOMOR16 PSUEDO!", - [(set GR16:$dst, (atomic_load_or addr:$ptr, GR16:$val))]>; + [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>; def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), "#ATOMXOR16 PSUEDO!", - [(set GR16:$dst, (atomic_load_xor addr:$ptr, GR16:$val))]>; + [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>; def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), "#ATOMNAND16 PSUEDO!", - [(set GR16:$dst, (atomic_load_nand addr:$ptr, GR16:$val))]>; + [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>; def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), "#ATOMMIN16 PSUEDO!", - [(set GR16:$dst, (atomic_load_min addr:$ptr, GR16:$val))]>; + [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>; def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), "#ATOMMAX16 PSUEDO!", - [(set GR16:$dst, (atomic_load_max addr:$ptr, GR16:$val))]>; + [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>; def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), "#ATOMUMIN16 PSUEDO!", - [(set GR16:$dst, (atomic_load_umin addr:$ptr, GR16:$val))]>; + [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>; def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), "#ATOMUMAX16 PSUEDO!", - [(set GR16:$dst, (atomic_load_umax addr:$ptr, GR16:$val))]>; + [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>; def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), "#ATOMAND8 PSUEDO!", - [(set GR8:$dst, (atomic_load_and addr:$ptr, GR8:$val))]>; + [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>; def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), "#ATOMOR8 PSUEDO!", - [(set GR8:$dst, (atomic_load_or addr:$ptr, GR8:$val))]>; + [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>; def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), "#ATOMXOR8 PSUEDO!", - [(set GR8:$dst, (atomic_load_xor addr:$ptr, GR8:$val))]>; + [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>; def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), "#ATOMNAND8 PSUEDO!", - [(set GR8:$dst, (atomic_load_nand addr:$ptr, GR8:$val))]>; + [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>; } //===----------------------------------------------------------------------===// |