aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/X86
diff options
context:
space:
mode:
authorDale Johannesen <dalej@apple.com>2008-08-28 02:44:49 +0000
committerDale Johannesen <dalej@apple.com>2008-08-28 02:44:49 +0000
commite00a8a2a2e11a37fd1ddf2504bd22d225d0994d0 (patch)
tree484cf9b89070b672dd21616a49c0f15109bd51b8 /lib/Target/X86
parentf2c785edf0d7ceb4491333146e289fdbbba1dddf (diff)
downloadexternal_llvm-e00a8a2a2e11a37fd1ddf2504bd22d225d0994d0.zip
external_llvm-e00a8a2a2e11a37fd1ddf2504bd22d225d0994d0.tar.gz
external_llvm-e00a8a2a2e11a37fd1ddf2504bd22d225d0994d0.tar.bz2
Split the ATOMIC NodeType's to include the size, e.g.
ATOMIC_LOAD_ADD_{8,16,32,64} instead of ATOMIC_LOAD_ADD. Increased the Hardcoded Constant OpActionsCapacity to match. Large but boring; no functional change. This is to support partial-word atomics on ppc; i8 is not a valid type there, so by the time we get to lowering, the ATOMIC_LOAD nodes looks the same whether the type was i8 or i32. The information can be added to the AtomicSDNode, but that is the largest SDNode; I don't fully understand the SDNode allocation, but it is sensitive to the largest node size, so increasing that must be bad. This is the alternative. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55457 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86')
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp35
-rw-r--r--lib/Target/X86/X86Instr64bit.td16
-rw-r--r--lib/Target/X86/X86InstrInfo.td40
3 files changed, 50 insertions, 41 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 9cc1bed..3bc5592 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -293,15 +293,15 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
// Expand certain atomics
- setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i8, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i16, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_8 , MVT::i8, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_16, MVT::i16, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_32, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_64, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i8, Expand);
- setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i16, Expand);
- setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i32, Expand);
- setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_8, MVT::i8, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_16, MVT::i16, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_32, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Expand);
// Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion.
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
@@ -5914,8 +5914,11 @@ SDNode* X86TargetLowering::ExpandATOMIC_LOAD_SUB(SDNode* Op, SelectionDAG &DAG)
MVT T = Op->getValueType(0);
SDValue negOp = DAG.getNode(ISD::SUB, T,
DAG.getConstant(0, T), Op->getOperand(2));
- return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, Op->getOperand(0),
- Op->getOperand(1), negOp,
+ return DAG.getAtomic((T==MVT::i8 ? ISD::ATOMIC_LOAD_ADD_8:
+ T==MVT::i16 ? ISD::ATOMIC_LOAD_ADD_16:
+ T==MVT::i32 ? ISD::ATOMIC_LOAD_ADD_32:
+ T==MVT::i64 ? ISD::ATOMIC_LOAD_ADD_64: 0),
+ Op->getOperand(0), Op->getOperand(1), negOp,
cast<AtomicSDNode>(Op)->getSrcValue(),
cast<AtomicSDNode>(Op)->getAlignment()).Val;
}
@@ -5925,7 +5928,10 @@ SDNode* X86TargetLowering::ExpandATOMIC_LOAD_SUB(SDNode* Op, SelectionDAG &DAG)
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) {
default: assert(0 && "Should not custom lower this!");
- case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
+ case ISD::ATOMIC_CMP_SWAP_8: return LowerCMP_SWAP(Op,DAG);
+ case ISD::ATOMIC_CMP_SWAP_16: return LowerCMP_SWAP(Op,DAG);
+ case ISD::ATOMIC_CMP_SWAP_32: return LowerCMP_SWAP(Op,DAG);
+ case ISD::ATOMIC_CMP_SWAP_64: return LowerCMP_SWAP(Op,DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
@@ -5979,8 +5985,11 @@ SDNode *X86TargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) {
default: assert(0 && "Should not custom lower this!");
case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG);
case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG);
- case ISD::ATOMIC_CMP_SWAP: return ExpandATOMIC_CMP_SWAP(N, DAG);
- case ISD::ATOMIC_LOAD_SUB: return ExpandATOMIC_LOAD_SUB(N,DAG);
+ case ISD::ATOMIC_CMP_SWAP_64: return ExpandATOMIC_CMP_SWAP(N, DAG);
+ case ISD::ATOMIC_LOAD_SUB_8: return ExpandATOMIC_LOAD_SUB(N,DAG);
+ case ISD::ATOMIC_LOAD_SUB_16: return ExpandATOMIC_LOAD_SUB(N,DAG);
+ case ISD::ATOMIC_LOAD_SUB_32: return ExpandATOMIC_LOAD_SUB(N,DAG);
+ case ISD::ATOMIC_LOAD_SUB_64: return ExpandATOMIC_LOAD_SUB(N,DAG);
}
}
diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td
index 93cd0c4..239ae97 100644
--- a/lib/Target/X86/X86Instr64bit.td
+++ b/lib/Target/X86/X86Instr64bit.td
@@ -1153,28 +1153,28 @@ let Constraints = "$val = $dst", Defs = [EFLAGS],
usesCustomDAGSchedInserter = 1 in {
def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMAND64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_and addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMOR64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_or addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMXOR64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_xor addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMNAND64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_nand addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
"#ATOMMIN64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_min addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMMAX64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_max addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMUMIN64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_umin addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMUMAX64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_umax addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index e55edce..ee84fc1 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -2637,66 +2637,66 @@ let Constraints = "$val = $dst", Defs = [EFLAGS],
usesCustomDAGSchedInserter = 1 in {
def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMAND32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_and addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMOR32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_or addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMXOR32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_xor addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMNAND32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_nand addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
"#ATOMMIN32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_min addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMMAX32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_max addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMUMIN32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_umin addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMUMAX32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_umax addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMAND16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_and addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMOR16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_or addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMXOR16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_xor addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMNAND16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_nand addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
"#ATOMMIN16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_min addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMMAX16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_max addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMUMIN16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_umin addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMUMAX16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_umax addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
"#ATOMAND8 PSUEDO!",
- [(set GR8:$dst, (atomic_load_and addr:$ptr, GR8:$val))]>;
+ [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
"#ATOMOR8 PSUEDO!",
- [(set GR8:$dst, (atomic_load_or addr:$ptr, GR8:$val))]>;
+ [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
"#ATOMXOR8 PSUEDO!",
- [(set GR8:$dst, (atomic_load_xor addr:$ptr, GR8:$val))]>;
+ [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
"#ATOMNAND8 PSUEDO!",
- [(set GR8:$dst, (atomic_load_nand addr:$ptr, GR8:$val))]>;
+ [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
}
//===----------------------------------------------------------------------===//