aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2007-11-24 07:07:01 +0000
committerChris Lattner <sabre@nondot.org>2007-11-24 07:07:01 +0000
commitdfb947d423f6355537d1a545fb8fa4fbc6dad230 (patch)
treeb2375fe38bafc33f9fa95eeeb37e92789700b941 /lib/Target
parent358670b9b52b7a074106a48e57df7f81cc8e803e (diff)
downloadexternal_llvm-dfb947d423f6355537d1a545fb8fa4fbc6dad230.zip
external_llvm-dfb947d423f6355537d1a545fb8fa4fbc6dad230.tar.gz
external_llvm-dfb947d423f6355537d1a545fb8fa4fbc6dad230.tar.bz2
Several changes:
1) Change the interface to TargetLowering::ExpandOperationResult to take and return entire NODES that need a result expanded, not just the value. This allows us to handle things like READCYCLECOUNTER, which returns two values. 2) Implement (extremely limited) support in LegalizeDAG::ExpandOp for MERGE_VALUES. 3) Reimplement custom lowering in LegalizeDAGTypes in terms of the new ExpandOperationResult. This makes the result simpler and fully general. 4) Implement (fully general) expand support for MERGE_VALUES in LegalizeDAGTypes. 5) Implement ExpandOperationResult support for ARM f64->i64 bitconvert and ARM i64 shifts, allowing them to work with LegalizeDAGTypes. 6) Implement ExpandOperationResult support for X86 READCYCLECOUNTER and FP_TO_SINT, allowing them to work with LegalizeDAGTypes. LegalizeDAGTypes now passes several more X86 codegen tests when enabled and when type legalization in LegalizeDAG is ifdef'd out. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44300 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp130
-rw-r--r--lib/Target/ARM/ARMISelLowering.h2
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp109
-rw-r--r--lib/Target/X86/X86ISelLowering.h12
4 files changed, 152 insertions, 101 deletions
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index d1cd8c0..608cc4c 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -949,10 +949,8 @@ static SDOperand LowerFORMAL_ARGUMENT(SDOperand Op, SelectionDAG &DAG,
vRegs[NumGPRs+1] = VReg;
SDOperand ArgValue2 = DAG.getCopyFromReg(Root, VReg, MVT::i32);
- if (ObjectVT == MVT::i64)
- ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
- else
- ArgValue = DAG.getNode(ARMISD::FMDRR, MVT::f64, ArgValue, ArgValue2);
+ assert(ObjectVT != MVT::i64 && "i64 should already be lowered");
+ ArgValue = DAG.getNode(ARMISD::FMDRR, MVT::f64, ArgValue, ArgValue2);
}
NumGPRs += ObjGPRs;
@@ -966,12 +964,9 @@ static SDOperand LowerFORMAL_ARGUMENT(SDOperand Op, SelectionDAG &DAG,
if (ObjGPRs == 0)
ArgValue = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0);
else {
- SDOperand ArgValue2 =
- DAG.getLoad(MVT::i32, Root, FIN, NULL, 0);
- if (ObjectVT == MVT::i64)
- ArgValue= DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
- else
- ArgValue= DAG.getNode(ARMISD::FMDRR, MVT::f64, ArgValue, ArgValue2);
+ SDOperand ArgValue2 = DAG.getLoad(MVT::i32, Root, FIN, NULL, 0);
+ assert(ObjectVT != MVT::i64 && "i64 should already be lowered");
+ ArgValue = DAG.getNode(ARMISD::FMDRR, MVT::f64, ArgValue, ArgValue2);
}
} else {
// Don't emit a dead load.
@@ -1256,51 +1251,6 @@ static SDOperand LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) {
return DAG.getNode(ARMISD::CNEG, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp);
}
-static SDOperand LowerBIT_CONVERT(SDOperand Op, SelectionDAG &DAG) {
- // Turn f64->i64 into FMRRD.
- assert(Op.getValueType() == MVT::i64 &&
- Op.getOperand(0).getValueType() == MVT::f64);
-
- Op = Op.getOperand(0);
- SDOperand Cvt = DAG.getNode(ARMISD::FMRRD, DAG.getVTList(MVT::i32, MVT::i32),
- &Op, 1);
-
- // Merge the pieces into a single i64 value.
- return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Cvt, Cvt.getValue(1));
-}
-
-static SDOperand LowerSRx(SDOperand Op, SelectionDAG &DAG,
- const ARMSubtarget *ST) {
- assert(Op.getValueType() == MVT::i64 &&
- (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) &&
- "Unknown shift to lower!");
-
- // We only lower SRA, SRL of 1 here, all others use generic lowering.
- if (!isa<ConstantSDNode>(Op.getOperand(1)) ||
- cast<ConstantSDNode>(Op.getOperand(1))->getValue() != 1)
- return SDOperand();
-
- // If we are in thumb mode, we don't have RRX.
- if (ST->isThumb()) return SDOperand();
-
- // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
- SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
- SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
- DAG.getConstant(1, MVT::i32));
-
- // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
- // captures the result into a carry flag.
- unsigned Opc = Op.getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
- Hi = DAG.getNode(Opc, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1);
-
- // The low part is an ARMISD::RRX operand, which shifts the carry in.
- Lo = DAG.getNode(ARMISD::RRX, MVT::i32, Lo, Hi.getValue(1));
-
- // Merge the pieces into a single i64 value.
- return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
-}
-
SDOperand ARMTargetLowering::LowerMEMCPYInline(SDOperand Chain,
SDOperand Dest,
SDOperand Source,
@@ -1396,6 +1346,51 @@ SDOperand ARMTargetLowering::LowerMEMCPYInline(SDOperand Chain,
return DAG.getNode(ISD::TokenFactor, MVT::Other, &TFOps[0], i);
}
+static SDNode *ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
+ // Turn f64->i64 into FMRRD.
+ assert(N->getValueType(0) == MVT::i64 &&
+ N->getOperand(0).getValueType() == MVT::f64);
+
+ SDOperand Op = N->getOperand(0);
+ SDOperand Cvt = DAG.getNode(ARMISD::FMRRD, DAG.getVTList(MVT::i32, MVT::i32),
+ &Op, 1);
+
+ // Merge the pieces into a single i64 value.
+ return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Cvt, Cvt.getValue(1)).Val;
+}
+
+static SDNode *ExpandSRx(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST) {
+ assert(N->getValueType(0) == MVT::i64 &&
+ (N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
+ "Unknown shift to lower!");
+
+ // We only lower SRA, SRL of 1 here, all others use generic lowering.
+ if (!isa<ConstantSDNode>(N->getOperand(1)) ||
+ cast<ConstantSDNode>(N->getOperand(1))->getValue() != 1)
+ return 0;
+
+ // If we are in thumb mode, we don't have RRX.
+ if (ST->isThumb()) return 0;
+
+ // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
+ SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(0),
+ DAG.getConstant(0, MVT::i32));
+ SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(0),
+ DAG.getConstant(1, MVT::i32));
+
+ // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
+ // captures the result into a carry flag.
+ unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
+ Hi = DAG.getNode(Opc, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1);
+
+ // The low part is an ARMISD::RRX operand, which shifts the carry in.
+ Lo = DAG.getNode(ARMISD::RRX, MVT::i32, Lo, Hi.getValue(1));
+
+ // Merge the pieces into a single i64 value.
+ return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi).Val;
+}
+
+
SDOperand ARMTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) {
default: assert(0 && "Don't know how to custom lower this!"); abort();
@@ -1415,20 +1410,35 @@ SDOperand ARMTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
- case ISD::BIT_CONVERT: return LowerBIT_CONVERT(Op, DAG);
- case ISD::SRL:
- case ISD::SRA: return LowerSRx(Op, DAG, Subtarget);
- case ISD::FORMAL_ARGUMENTS:
- return LowerFORMAL_ARGUMENTS(Op, DAG);
+ case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::RETURNADDR: break;
case ISD::FRAMEADDR: break;
case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
case ISD::MEMCPY: return LowerMEMCPY(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+
+
+ // FIXME: Remove these when LegalizeDAGTypes lands.
+ case ISD::BIT_CONVERT: return SDOperand(ExpandBIT_CONVERT(Op.Val, DAG), 0);
+ case ISD::SRL:
+ case ISD::SRA: return SDOperand(ExpandSRx(Op.Val, DAG,Subtarget),0);
}
return SDOperand();
}
+
+/// ExpandOperationResult - Provide custom lowering hooks for expanding
+/// operations.
+SDNode *ARMTargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
+ switch (N->getOpcode()) {
+ default: assert(0 && "Don't know how to custom expand this!"); abort();
+ case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(N, DAG);
+ case ISD::SRL:
+ case ISD::SRA: return ExpandSRx(N, DAG, Subtarget);
+ }
+}
+
+
//===----------------------------------------------------------------------===//
// ARM Scheduler Hooks
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index e6fb945..93971c5 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -76,6 +76,8 @@ namespace llvm {
explicit ARMTargetLowering(TargetMachine &TM);
virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
+ virtual SDNode *ExpandOperationResult(SDNode *N, SelectionDAG &DAG);
+
virtual const char *getTargetNodeName(unsigned Opcode) const;
virtual MachineBasicBlock *InsertAtEndOfBasicBlock(MachineInstr *MI,
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 9a86561..0482dad 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1258,7 +1258,7 @@ X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG,
SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
- SDOperand AlwaysInline = DAG.getConstant(1, MVT::i1);
+ SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32);
return DAG.getMemcpy(Chain, PtrOff, Arg, SizeNode, AlignNode,
AlwaysInline);
@@ -3918,22 +3918,22 @@ SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
return Result;
}
-SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
+std::pair<SDOperand,SDOperand> X86TargetLowering::
+FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) {
assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
"Unknown FP_TO_SINT to lower!");
- SDOperand Result;
// These are really Legal.
if (Op.getValueType() == MVT::i32 &&
X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32)
- return Result;
+ return std::make_pair(SDOperand(), SDOperand());
if (Op.getValueType() == MVT::i32 &&
X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)
- return Result;
+ return std::make_pair(SDOperand(), SDOperand());
if (Subtarget->is64Bit() &&
Op.getValueType() == MVT::i64 &&
Op.getOperand(0).getValueType() != MVT::f80)
- return Result;
+ return std::make_pair(SDOperand(), SDOperand());
// We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
// stack slot.
@@ -3943,10 +3943,10 @@ SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
unsigned Opc;
switch (Op.getValueType()) {
- default: assert(0 && "Invalid FP_TO_SINT to lower!");
- case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
- case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
- case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
+ default: assert(0 && "Invalid FP_TO_SINT to lower!");
+ case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
+ case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
+ case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
}
SDOperand Chain = DAG.getEntryNode();
@@ -3969,20 +3969,33 @@ SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
SDOperand Ops[] = { Chain, Value, StackSlot };
SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3);
- // Load the result. If this is an i64 load on an x86-32 host, expand the
- // load.
- if (Op.getValueType() != MVT::i64 || Subtarget->is64Bit())
- return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0);
-
- SDOperand Lo = DAG.getLoad(MVT::i32, FIST, StackSlot, NULL, 0);
- StackSlot = DAG.getNode(ISD::ADD, StackSlot.getValueType(), StackSlot,
- DAG.getConstant(StackSlot.getValueType(), 4));
- SDOperand Hi = DAG.getLoad(MVT::i32, FIST, StackSlot, NULL, 0);
-
+ return std::make_pair(FIST, StackSlot);
+}
+
+SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
+ assert((Op.getValueType() != MVT::i64 || Subtarget->is64Bit()) &&
+ "This FP_TO_SINT must be expanded!");
+
+ std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG);
+ SDOperand FIST = Vals.first, StackSlot = Vals.second;
+ if (FIST.Val == 0) return SDOperand();
- return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
+ // Load the result.
+ return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0);
}
+SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) {
+ std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG);
+ SDOperand FIST = Vals.first, StackSlot = Vals.second;
+ if (FIST.Val == 0) return 0;
+
+ // Return an i64 load from the stack slot.
+ SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0);
+
+ // Use a MERGE_VALUES node to drop the chain result value.
+ return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val;
+}
+
SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
MVT::ValueType VT = Op.getValueType();
MVT::ValueType EltVT = VT;
@@ -4587,32 +4600,36 @@ SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain,
return Chain;
}
-SDOperand
-X86TargetLowering::LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG) {
+/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain
+SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDOperand TheOp = Op.getOperand(0);
- SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheOp, 1);
+ SDOperand TheChain = N->getOperand(0);
+ SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1);
if (Subtarget->is64Bit()) {
- SDOperand Copy1 =
- DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1));
- SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::RDX,
- MVT::i64, Copy1.getValue(2));
- SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, Copy2,
+ SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1));
+ SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX,
+ MVT::i64, rax.getValue(2));
+ SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx,
DAG.getConstant(32, MVT::i8));
SDOperand Ops[] = {
- DAG.getNode(ISD::OR, MVT::i64, Copy1, Tmp), Copy2.getValue(1)
+ DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1)
};
Tys = DAG.getVTList(MVT::i64, MVT::Other);
- return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2);
+ return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val;
}
- SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1));
- SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::EDX,
- MVT::i32, Copy1.getValue(2));
- SDOperand Ops[] = { Copy1, Copy2, Copy2.getValue(1) };
- Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
- return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 3);
+ SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1));
+ SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX,
+ MVT::i32, eax.getValue(2));
+ // Use a buildpair to merge the two 32-bit values into a 64-bit one.
+ SDOperand Ops[] = { eax, edx };
+ Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2);
+
+ // Use a MERGE_VALUES to return the value and chain.
+ Ops[1] = edx.getValue(1);
+ Tys = DAG.getVTList(MVT::i64, MVT::Other);
+ return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val;
}
SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) {
@@ -5032,7 +5049,6 @@ SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::MEMSET: return LowerMEMSET(Op, DAG);
case ISD::MEMCPY: return LowerMEMCPY(Op, DAG);
- case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::VACOPY: return LowerVACOPY(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
@@ -5044,8 +5060,21 @@ SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
case ISD::FLT_ROUNDS: return LowerFLT_ROUNDS(Op, DAG);
+
+
+ // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands.
+ case ISD::READCYCLECOUNTER:
+ return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0);
+ }
+}
+
+/// ExpandOperation - Provide custom lowering hooks for expanding operations.
+SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
+ switch (N->getOpcode()) {
+ default: assert(0 && "Should not custom lower this!");
+ case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG);
+ case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG);
}
- return SDOperand();
}
const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index ac16ff4..5fe49f8 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -321,6 +321,12 @@ namespace llvm {
///
virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
+ /// ExpandOperation - Custom lower the specified operation, splitting the
+ /// value into two pieces.
+ ///
+ virtual SDNode *ExpandOperationResult(SDNode *N, SelectionDAG &DAG);
+
+
virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
virtual MachineBasicBlock *InsertAtEndOfBasicBlock(MachineInstr *MI,
@@ -444,6 +450,9 @@ namespace llvm {
SDOperand LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC);
+ std::pair<SDOperand,SDOperand> FP_TO_SINTHelper(SDOperand Op,
+ SelectionDAG &DAG);
+
SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG);
@@ -471,7 +480,6 @@ namespace llvm {
SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG);
- SDOperand LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerVACOPY(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG);
@@ -481,6 +489,8 @@ namespace llvm {
SDOperand LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerTRAMPOLINE(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerFLT_ROUNDS(SDOperand Op, SelectionDAG &DAG);
+ SDNode *ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG);
+ SDNode *ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG);
};
}