diff options
author | Duncan Sands <baldrick@free.fr> | 2008-06-06 12:08:01 +0000 |
---|---|---|
committer | Duncan Sands <baldrick@free.fr> | 2008-06-06 12:08:01 +0000 |
commit | 92c439168b552f73b1459d8ce1e31975cdca6d2a (patch) | |
tree | 318323f012863299f9ae063e79a47985c2e8dc4b /lib/Target | |
parent | 533604e367c255a7ad43d1857231db7abf360581 (diff) | |
download | external_llvm-92c439168b552f73b1459d8ce1e31975cdca6d2a.zip external_llvm-92c439168b552f73b1459d8ce1e31975cdca6d2a.tar.gz external_llvm-92c439168b552f73b1459d8ce1e31975cdca6d2a.tar.bz2 |
Wrap MVT::ValueType in a struct to get type safety
and better control the abstraction. Rename the type
to MVT. To update out-of-tree patches, the main
thing to do is to rename MVT::ValueType to MVT, and
rewrite expressions like MVT::getSizeInBits(VT) in
the form VT.getSizeInBits(). Use VT.getSimpleVT()
to extract a MVT::SimpleValueType for use in switch
statements (you will get an assert failure if VT is
an extended value type - these shouldn't exist after
type legalization).
This results in a small speedup of codegen and no
new testsuite failures (x86-64 linux).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@52044 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
30 files changed, 711 insertions, 709 deletions
diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 5029a69..eaa16fc 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -660,7 +660,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { case ISD::LOAD: { LoadSDNode *LD = cast<LoadSDNode>(Op); ISD::MemIndexedMode AM = LD->getAddressingMode(); - MVT::ValueType LoadedVT = LD->getMemoryVT(); + MVT LoadedVT = LD->getMemoryVT(); if (AM != ISD::UNINDEXED) { SDOperand Offset, AMOpc; bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); @@ -741,7 +741,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { } case ARMISD::CMOV: { bool isThumb = Subtarget->isThumb(); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand N0 = Op.getOperand(0); SDOperand N1 = Op.getOperand(1); SDOperand N2 = Op.getOperand(2); @@ -805,7 +805,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { cast<ConstantSDNode>(N2)->getValue()), MVT::i32); SDOperand Ops[] = { N0, N1, Tmp2, N3, InFlag }; unsigned Opc = 0; - switch (VT) { + switch (VT.getSimpleVT()) { default: assert(false && "Illegal conditional move type!"); break; case MVT::i32: @@ -821,7 +821,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { return CurDAG->SelectNodeTo(Op.Val, Opc, VT, Ops, 5); } case ARMISD::CNEG: { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand N0 = Op.getOperand(0); SDOperand N1 = Op.getOperand(1); SDOperand N2 = Op.getOperand(2); @@ -837,7 +837,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { cast<ConstantSDNode>(N2)->getValue()), MVT::i32); SDOperand Ops[] = { N0, N1, Tmp2, N3, InFlag }; unsigned Opc = 0; - switch (VT) { + switch (VT.getSimpleVT()) { default: assert(false && "Illegal conditional move type!"); break; case MVT::f32: diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index dc76b7b..cfb98cb 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -363,7 +363,7 @@ static bool FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, } static void -HowToPassArgument(MVT::ValueType ObjectVT, unsigned NumGPRs, +HowToPassArgument(MVT ObjectVT, unsigned NumGPRs, unsigned StackOffset, unsigned &NeededGPRs, unsigned &NeededStackSize, unsigned &GPRPad, unsigned &StackPad, ISD::ArgFlagsTy Flags) { @@ -375,7 +375,7 @@ HowToPassArgument(MVT::ValueType ObjectVT, unsigned NumGPRs, GPRPad = NumGPRs % ((align + 3)/4); StackPad = StackOffset % align; unsigned firstGPR = NumGPRs + GPRPad; - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); case MVT::i32: case MVT::f32: @@ -400,7 +400,7 @@ HowToPassArgument(MVT::ValueType ObjectVT, unsigned NumGPRs, /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter /// nodes. SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType RetVT= Op.Val->getValueType(0); + MVT RetVT= Op.Val->getValueType(0); SDOperand Chain = Op.getOperand(0); unsigned CallConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); assert((CallConv == CallingConv::C || @@ -419,7 +419,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { unsigned ObjGPRs; unsigned StackPad; unsigned GPRPad; - MVT::ValueType ObjectVT = Op.getOperand(5+2*i).getValueType(); + MVT ObjectVT = Op.getOperand(5+2*i).getValueType(); ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags(); HowToPassArgument(ObjectVT, NumGPRs, NumBytes, ObjGPRs, ObjSize, @@ -446,7 +446,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { SDOperand Arg = Op.getOperand(5+2*i); ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags(); - MVT::ValueType ArgVT = Arg.getValueType(); + MVT ArgVT = Arg.getValueType(); unsigned ObjSize; unsigned ObjGPRs; @@ -457,7 +457,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { NumGPRs += GPRPad; ArgOffset += StackPad; if (ObjGPRs > 0) { - switch (ArgVT) { + switch (ArgVT.getSimpleVT()) { default: assert(0 && "Unexpected ValueType for argument!"); case MVT::i32: RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Arg)); @@ -587,7 +587,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { InFlag = Chain.getValue(1); } - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. @@ -617,7 +617,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { NodeTys.clear(); // If the call has results, copy the values out of the ret val registers. - switch (RetVT) { + switch (RetVT.getSimpleVT()) { default: assert(0 && "Unexpected ret value!"); case MVT::Other: break; @@ -708,7 +708,7 @@ static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { // be used to form addressing mode. These wrapped nodes will be selected // into MOVi. static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); SDOperand Res; if (CP->isMachineConstantPoolEntry()) @@ -724,7 +724,7 @@ static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { SDOperand ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) { - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue, @@ -758,7 +758,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, GlobalValue *GV = GA->getGlobal(); SDOperand Offset; SDOperand Chain = DAG.getEntryNode(); - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); // Get the Thread Pointer SDOperand ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, PtrVT); @@ -807,7 +807,7 @@ ARMTargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { SDOperand ARMTargetLowering::LowerGlobalAddressELF(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); Reloc::Model RelocM = getTargetMachine().getRelocationModel(); if (RelocM == Reloc::PIC_) { @@ -840,7 +840,7 @@ static bool GVIsIndirectSymbol(GlobalValue *GV, Reloc::Model RelocM) { SDOperand ARMTargetLowering::LowerGlobalAddressDarwin(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); Reloc::Model RelocM = getTargetMachine().getRelocationModel(); bool IsIndirect = GVIsIndirectSymbol(GV, RelocM); @@ -875,7 +875,7 @@ SDOperand ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDOperand Op, SelectionDAG &DAG){ assert(Subtarget->isTargetELF() && "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; ARMConstantPoolValue *CPV = new ARMConstantPoolValue("_GLOBAL_OFFSET_TABLE_", ARMPCLabelIndex, @@ -888,7 +888,7 @@ SDOperand ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDOperand Op, } static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); switch (IntNo) { default: return SDOperand(); // Don't custom lower most intrinsics. @@ -901,7 +901,7 @@ static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, unsigned VarArgsFrameIndex) { // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0); @@ -911,7 +911,7 @@ static SDOperand LowerFORMAL_ARGUMENT(SDOperand Op, SelectionDAG &DAG, unsigned ArgNo, unsigned &NumGPRs, unsigned &ArgOffset) { MachineFunction &MF = DAG.getMachineFunction(); - MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); + MVT ObjectVT = Op.getValue(ArgNo).getValueType(); SDOperand Root = Op.getOperand(0); std::vector<SDOperand> ArgValues; MachineRegisterInfo &RegInfo = MF.getRegInfo(); @@ -1025,7 +1025,7 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { ArgValues.push_back(Root); // Return the new list of results. - std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), + std::vector<MVT> RetVT(Op.Val->value_begin(), Op.Val->value_end()); return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); } @@ -1123,7 +1123,7 @@ static SDOperand getVFPCmp(SDOperand LHS, SDOperand RHS, SelectionDAG &DAG) { static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG, const ARMSubtarget *ST) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand LHS = Op.getOperand(0); SDOperand RHS = Op.getOperand(1); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); @@ -1195,7 +1195,7 @@ SDOperand ARMTargetLowering::LowerBR_JT(SDOperand Op, SelectionDAG &DAG) { SDOperand Table = Op.getOperand(1); SDOperand Index = Op.getOperand(2); - MVT::ValueType PTy = getPointerTy(); + MVT PTy = getPointerTy(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); SDOperand UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); @@ -1204,7 +1204,7 @@ SDOperand ARMTargetLowering::LowerBR_JT(SDOperand Op, SelectionDAG &DAG) { Index = DAG.getNode(ISD::MUL, PTy, Index, DAG.getConstant(4, PTy)); SDOperand Addr = DAG.getNode(ISD::ADD, PTy, Index, Table); bool isPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_; - Addr = DAG.getLoad(isPIC ? (MVT::ValueType)MVT::i32 : PTy, + Addr = DAG.getLoad(isPIC ? (MVT)MVT::i32 : PTy, Chain, Addr, NULL, 0); Chain = Addr.getValue(1); if (isPIC) @@ -1220,7 +1220,7 @@ static SDOperand LowerFP_TO_INT(SDOperand Op, SelectionDAG &DAG) { } static SDOperand LowerINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); unsigned Opc = Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF; @@ -1232,8 +1232,8 @@ static SDOperand LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { // Implement fcopysign with a fabs and a conditional fneg. SDOperand Tmp0 = Op.getOperand(0); SDOperand Tmp1 = Op.getOperand(1); - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType SrcVT = Tmp1.getValueType(); + MVT VT = Op.getValueType(); + MVT SrcVT = Tmp1.getValueType(); SDOperand AbsVal = DAG.getNode(ISD::FABS, VT, Tmp0); SDOperand Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG); SDOperand ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32); @@ -1265,7 +1265,7 @@ ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, unsigned BytesLeft = SizeVal & 3; unsigned NumMemOps = SizeVal >> 2; unsigned EmittedNumMemOps = 0; - MVT::ValueType VT = MVT::i32; + MVT VT = MVT::i32; unsigned VTSize = 4; unsigned i = 0; const unsigned MAX_LOADS_IN_LDM = 6; @@ -1536,7 +1536,7 @@ SDOperand ARMTargetLowering::PerformDAGCombine(SDNode *N, /// isLegalAddressImmediate - Return true if the integer value can be used /// as the offset of the target addressing mode for load / store of the /// given type. -static bool isLegalAddressImmediate(int64_t V, MVT::ValueType VT, +static bool isLegalAddressImmediate(int64_t V, MVT VT, const ARMSubtarget *Subtarget) { if (V == 0) return true; @@ -1546,7 +1546,7 @@ static bool isLegalAddressImmediate(int64_t V, MVT::ValueType VT, return false; unsigned Scale = 1; - switch (VT) { + switch (VT.getSimpleVT()) { default: return false; case MVT::i1: case MVT::i8: @@ -1570,7 +1570,7 @@ static bool isLegalAddressImmediate(int64_t V, MVT::ValueType VT, if (V < 0) V = - V; - switch (VT) { + switch (VT.getSimpleVT()) { default: return false; case MVT::i1: case MVT::i8: @@ -1615,7 +1615,7 @@ bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, return false; int Scale = AM.Scale; - switch (getValueType(Ty)) { + switch (getValueType(Ty).getSimpleVT()) { default: return false; case MVT::i1: case MVT::i8: @@ -1650,7 +1650,7 @@ bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, } -static bool getIndexedAddressParts(SDNode *Ptr, MVT::ValueType VT, +static bool getIndexedAddressParts(SDNode *Ptr, MVT VT, bool isSEXTLoad, SDOperand &Base, SDOperand &Offset, bool &isInc, SelectionDAG &DAG) { @@ -1717,7 +1717,7 @@ ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, if (Subtarget->isThumb()) return false; - MVT::ValueType VT; + MVT VT; SDOperand Ptr; bool isSEXTLoad = false; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { @@ -1751,7 +1751,7 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, if (Subtarget->isThumb()) return false; - MVT::ValueType VT; + MVT VT; SDOperand Ptr; bool isSEXTLoad = false; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { @@ -1816,7 +1816,7 @@ ARMTargetLowering::getConstraintType(const std::string &Constraint) const { std::pair<unsigned, const TargetRegisterClass*> ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { if (Constraint.size() == 1) { // GCC RS6000 Constraint Letters switch (Constraint[0]) { @@ -1838,7 +1838,7 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, std::vector<unsigned> ARMTargetLowering:: getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { if (Constraint.size() != 1) return std::vector<unsigned>(); diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index ce6d5fe..8e5a8b3 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -114,10 +114,10 @@ namespace llvm { ConstraintType getConstraintType(const std::string &Constraint) const; std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; virtual const ARMSubtarget* getSubtarget() { return Subtarget; diff --git a/lib/Target/Alpha/AlphaISelDAGToDAG.cpp b/lib/Target/Alpha/AlphaISelDAGToDAG.cpp index 303c5aa..c7eefcc 100644 --- a/lib/Target/Alpha/AlphaISelDAGToDAG.cpp +++ b/lib/Target/Alpha/AlphaISelDAGToDAG.cpp @@ -334,7 +334,7 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { case ISD::TargetConstantFP: { ConstantFPSDNode *CN = cast<ConstantFPSDNode>(N); bool isDouble = N->getValueType(0) == MVT::f64; - MVT::ValueType T = isDouble ? MVT::f64 : MVT::f32; + MVT T = isDouble ? MVT::f64 : MVT::f32; if (CN->getValueAPF().isPosZero()) { return CurDAG->SelectNodeTo(N, isDouble ? Alpha::CPYST : Alpha::CPYSS, T, CurDAG->getRegister(Alpha::F31, T), @@ -350,7 +350,7 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { } case ISD::SETCC: - if (MVT::isFloatingPoint(N->getOperand(0).Val->getValueType(0))) { + if (N->getOperand(0).Val->getValueType(0).isFloatingPoint()) { ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); unsigned Opc = Alpha::WTF; @@ -404,9 +404,9 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { break; case ISD::SELECT: - if (MVT::isFloatingPoint(N->getValueType(0)) && + if (N->getValueType(0).isFloatingPoint() && (N->getOperand(0).getOpcode() != ISD::SETCC || - !MVT::isFloatingPoint(N->getOperand(0).getOperand(1).getValueType()))) { + !N->getOperand(0).getOperand(1).getValueType().isFloatingPoint())) { //This should be the condition not covered by the Patterns //FIXME: Don't have SelectCode die, but rather return something testable // so that things like this can be caught in fall though code @@ -472,7 +472,7 @@ void AlphaDAGToDAGISel::SelectCALL(SDOperand Op) { AddToISelQueue(Chain); std::vector<SDOperand> CallOperands; - std::vector<MVT::ValueType> TypeOperands; + std::vector<MVT> TypeOperands; //grab the arguments for(int i = 2, e = N->getNumOperands(); i < e; ++i) { @@ -489,7 +489,7 @@ void AlphaDAGToDAGISel::SelectCALL(SDOperand Op) { for (int i = 6; i < count; ++i) { unsigned Opc = Alpha::WTF; - if (MVT::isInteger(TypeOperands[i])) { + if (TypeOperands[i].isInteger()) { Opc = Alpha::STQ; } else if (TypeOperands[i] == MVT::f32) { Opc = Alpha::STS; @@ -504,7 +504,7 @@ void AlphaDAGToDAGISel::SelectCALL(SDOperand Op) { Chain = SDOperand(CurDAG->getTargetNode(Opc, MVT::Other, Ops, 4), 0); } for (int i = 0; i < std::min(6, count); ++i) { - if (MVT::isInteger(TypeOperands[i])) { + if (TypeOperands[i].isInteger()) { Chain = CurDAG->getCopyToReg(Chain, args_int[i], CallOperands[i], InFlag); InFlag = Chain.getValue(1); } else if (TypeOperands[i] == MVT::f32 || TypeOperands[i] == MVT::f64) { @@ -533,7 +533,7 @@ void AlphaDAGToDAGISel::SelectCALL(SDOperand Op) { std::vector<SDOperand> CallResults; - switch (N->getValueType(0)) { + switch (N->getValueType(0).getSimpleVT()) { default: assert(0 && "Unexpected ret value!"); case MVT::Other: break; case MVT::i64: diff --git a/lib/Target/Alpha/AlphaISelLowering.cpp b/lib/Target/Alpha/AlphaISelLowering.cpp index 91b1180..494edda 100644 --- a/lib/Target/Alpha/AlphaISelLowering.cpp +++ b/lib/Target/Alpha/AlphaISelLowering.cpp @@ -145,8 +145,7 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM) : TargetLowering(TM) computeRegisterProperties(); } -MVT::ValueType -AlphaTargetLowering::getSetCCResultType(const SDOperand &) const { +MVT AlphaTargetLowering::getSetCCResultType(const SDOperand &) const { return MVT::i64; } @@ -169,7 +168,7 @@ const char *AlphaTargetLowering::getTargetNodeName(unsigned Opcode) const { } static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); SDOperand Zero = DAG.getConstant(0, PtrVT); @@ -217,14 +216,13 @@ static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { SDOperand argt; - MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); + MVT ObjectVT = Op.getValue(ArgNo).getValueType(); SDOperand ArgVal; if (ArgNo < 6) { - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: - cerr << "Unknown Type " << ObjectVT << "\n"; - abort(); + assert(false && "Invalid value type!"); case MVT::f64: args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo], &Alpha::F8RCRegClass); @@ -282,7 +280,7 @@ static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, ArgValues.push_back(Root); // Return the new list of results. - std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), + std::vector<MVT> RetVT(Op.Val->value_begin(), Op.Val->value_end()); return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); } @@ -300,12 +298,12 @@ static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { break; //return SDOperand(); // ret void is legal case 3: { - MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); + MVT ArgVT = Op.getOperand(1).getValueType(); unsigned ArgReg; - if (MVT::isInteger(ArgVT)) + if (ArgVT.isInteger()) ArgReg = Alpha::R0; else { - assert(MVT::isFloatingPoint(ArgVT)); + assert(ArgVT.isFloatingPoint()); ArgReg = Alpha::F0; } Copy = DAG.getCopyToReg(Copy, ArgReg, Op.getOperand(1), Copy.getValue(1)); @@ -332,7 +330,7 @@ AlphaTargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, std::vector<SDOperand> args_to_use; for (unsigned i = 0, e = Args.size(); i != e; ++i) { - switch (getValueType(Args[i].Ty)) { + switch (getValueType(Args[i].Ty).getSimpleVT()) { default: assert(0 && "Unexpected ValueType for argument!"); case MVT::i1: case MVT::i8: @@ -355,9 +353,9 @@ AlphaTargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, args_to_use.push_back(Args[i].Node); } - std::vector<MVT::ValueType> RetVals; - MVT::ValueType RetTyVT = getValueType(RetTy); - MVT::ValueType ActualRetTyVT = RetTyVT; + std::vector<MVT> RetVals; + MVT RetTyVT = getValueType(RetTy); + MVT ActualRetTyVT = RetTyVT; if (RetTyVT >= MVT::i1 && RetTyVT <= MVT::i32) ActualRetTyVT = MVT::i64; @@ -407,17 +405,17 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::JumpTable: return LowerJumpTable(Op, DAG); case ISD::SINT_TO_FP: { - assert(MVT::i64 == Op.getOperand(0).getValueType() && + assert(Op.getOperand(0).getValueType() == MVT::i64 && "Unhandled SINT_TO_FP type in custom expander!"); SDOperand LD; - bool isDouble = MVT::f64 == Op.getValueType(); + bool isDouble = Op.getValueType() == MVT::f64; LD = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); SDOperand FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, isDouble?MVT::f64:MVT::f32, LD); return FP; } case ISD::FP_TO_SINT: { - bool isDouble = MVT::f64 == Op.getOperand(0).getValueType(); + bool isDouble = Op.getOperand(0).getValueType() == MVT::f64; SDOperand src = Op.getOperand(0); if (!isDouble) //Promote @@ -465,7 +463,7 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::SREM: //Expand only on constant case if (Op.getOperand(1).getOpcode() == ISD::Constant) { - MVT::ValueType VT = Op.Val->getValueType(0); + MVT VT = Op.Val->getValueType(0); SDOperand Tmp1 = Op.Val->getOpcode() == ISD::UREM ? BuildUDIV(Op.Val, DAG, NULL) : BuildSDIV(Op.Val, DAG, NULL); @@ -476,7 +474,7 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { //fall through case ISD::SDIV: case ISD::UDIV: - if (MVT::isInteger(Op.getValueType())) { + if (Op.getValueType().isInteger()) { if (Op.getOperand(1).getOpcode() == ISD::Constant) return Op.getOpcode() == ISD::SDIV ? BuildSDIV(Op.Val, DAG, NULL) : BuildUDIV(Op.Val, DAG, NULL); @@ -505,7 +503,7 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { SDOperand Offset = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, Base.getValue(1), Tmp, NULL, 0, MVT::i32); SDOperand DataPtr = DAG.getNode(ISD::ADD, MVT::i64, Base, Offset); - if (MVT::isFloatingPoint(Op.getValueType())) + if (Op.getValueType().isFloatingPoint()) { //if fp && Offset < 6*8, then subtract 6*8 from DataPtr SDOperand FPDataPtr = DAG.getNode(ISD::SUB, MVT::i64, DataPtr, @@ -596,7 +594,7 @@ AlphaTargetLowering::getConstraintType(const std::string &Constraint) const { std::vector<unsigned> AlphaTargetLowering:: getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { default: break; // Unknown constriant letter diff --git a/lib/Target/Alpha/AlphaISelLowering.h b/lib/Target/Alpha/AlphaISelLowering.h index 8738d02..f88437e 100644 --- a/lib/Target/Alpha/AlphaISelLowering.h +++ b/lib/Target/Alpha/AlphaISelLowering.h @@ -67,7 +67,7 @@ namespace llvm { explicit AlphaTargetLowering(TargetMachine &TM); /// getSetCCResultType - Get the SETCC result ValueType - virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDOperand &) const; /// LowerOperation - Provide custom lowering hooks for some operations. /// @@ -88,7 +88,7 @@ namespace llvm { std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; bool hasITOF() { return useITOF; } diff --git a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp index b491b13..c181f3c 100644 --- a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp +++ b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp @@ -110,7 +110,7 @@ namespace { bool isIntS16Immediate(ConstantSDNode *CN, short &Imm) { - MVT::ValueType vt = CN->getValueType(0); + MVT vt = CN->getValueType(0); Imm = (short) CN->getValue(); if (vt >= MVT::i1 && vt <= MVT::i16) { return true; @@ -139,7 +139,7 @@ namespace { static bool isFPS16Immediate(ConstantFPSDNode *FPN, short &Imm) { - MVT::ValueType vt = FPN->getValueType(0); + MVT vt = FPN->getValueType(0); if (vt == MVT::f32) { int val = FloatToBits(FPN->getValueAPF().convertToFloat()); int sval = (int) ((val << 16) >> 16); @@ -161,10 +161,10 @@ namespace { } //===------------------------------------------------------------------===// - //! MVT::ValueType to "useful stuff" mapping structure: + //! MVT to "useful stuff" mapping structure: struct valtype_map_s { - MVT::ValueType VT; + MVT VT; unsigned ldresult_ins; /// LDRESULT instruction (0 = undefined) bool ldresult_imm; /// LDRESULT instruction requires immediate? int prefslot_byte; /// Byte offset of the "preferred" slot @@ -189,7 +189,7 @@ namespace { const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]); - const valtype_map_s *getValueTypeMapEntry(MVT::ValueType VT) + const valtype_map_s *getValueTypeMapEntry(MVT VT) { const valtype_map_s *retval = 0; for (size_t i = 0; i < n_valtype_map; ++i) { @@ -203,7 +203,7 @@ namespace { #ifndef NDEBUG if (retval == 0) { cerr << "SPUISelDAGToDAG.cpp: getValueTypeMapEntry returns NULL for " - << MVT::getValueTypeString(VT) + << VT.getMVTString() << "\n"; abort(); } @@ -364,7 +364,7 @@ bool SPUDAGToDAGISel::SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Index) { // These match the addr256k operand type: - MVT::ValueType OffsVT = MVT::i16; + MVT OffsVT = MVT::i16; SDOperand Zero = CurDAG->getTargetConstant(0, OffsVT); switch (N.getOpcode()) { @@ -446,7 +446,7 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Bas SDOperand &Index, int minOffset, int maxOffset) { unsigned Opc = N.getOpcode(); - unsigned PtrTy = SPUtli.getPointerTy(); + MVT PtrTy = SPUtli.getPointerTy(); if (Opc == ISD::FrameIndex) { // Stack frame index must be less than 512 (divided by 16): @@ -587,7 +587,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { unsigned Opc = N->getOpcode(); int n_ops = -1; unsigned NewOpc; - MVT::ValueType OpVT = Op.getValueType(); + MVT OpVT = Op.getValueType(); SDOperand Ops[8]; if (Opc >= ISD::BUILTIN_OP_END && Opc < SPUISD::FIRST_NUMBER) { @@ -596,7 +596,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { // Selects to (add $sp, FI * stackSlotSize) int FI = SPUFrameInfo::FItoStackOffset(cast<FrameIndexSDNode>(N)->getIndex()); - MVT::ValueType PtrVT = SPUtli.getPointerTy(); + MVT PtrVT = SPUtli.getPointerTy(); // Adjust stack slot to actual offset in frame: if (isS10Constant(FI)) { @@ -636,7 +636,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { } } else if (Opc == SPUISD::LDRESULT) { // Custom select instructions for LDRESULT - unsigned VT = N->getValueType(0); + MVT VT = N->getValueType(0); SDOperand Arg = N->getOperand(0); SDOperand Chain = N->getOperand(1); SDNode *Result; @@ -644,7 +644,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { if (vtm->ldresult_ins == 0) { cerr << "LDRESULT for unsupported type: " - << MVT::getValueTypeString(VT) + << VT.getMVTString() << "\n"; abort(); } @@ -670,7 +670,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { /* || Op0.getOpcode() == SPUISD::AFormAddr) */ // (IndirectAddr (LDRESULT, imm)) SDOperand Op1 = Op.getOperand(1); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); DEBUG(cerr << "CellSPU: IndirectAddr(LDRESULT, imm):\nOp0 = "); DEBUG(Op.getOperand(0).Val->dump(CurDAG)); diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 0a736d7..36c73b8 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -38,9 +38,9 @@ using namespace llvm; namespace { std::map<unsigned, const char *> node_names; - //! MVT::ValueType mapping to useful data for Cell SPU + //! MVT mapping to useful data for Cell SPU struct valtype_map_s { - const MVT::ValueType valtype; + const MVT valtype; const int prefslot_byte; }; @@ -57,7 +57,7 @@ namespace { const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]); - const valtype_map_s *getValueTypeMapEntry(MVT::ValueType VT) { + const valtype_map_s *getValueTypeMapEntry(MVT VT) { const valtype_map_s *retval = 0; for (size_t i = 0; i < n_valtype_map; ++i) { @@ -70,7 +70,7 @@ namespace { #ifndef NDEBUG if (retval == 0) { cerr << "getValueTypeMapEntry returns NULL for " - << MVT::getValueTypeString(VT) + << VT.getMVTString() << "\n"; abort(); } @@ -162,8 +162,10 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) // SPU's loads and stores have to be custom lowered: for (unsigned sctype = (unsigned) MVT::i1; sctype < (unsigned) MVT::f128; ++sctype) { - setOperationAction(ISD::LOAD, sctype, Custom); - setOperationAction(ISD::STORE, sctype, Custom); + MVT VT = (MVT::SimpleValueType)sctype; + + setOperationAction(ISD::LOAD, VT, Custom); + setOperationAction(ISD::STORE, VT, Custom); } // Custom lower BRCOND for i1, i8 to "promote" the result to @@ -296,9 +298,11 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) // appropriate instructions to materialize the address. for (unsigned sctype = (unsigned) MVT::i1; sctype < (unsigned) MVT::f128; ++sctype) { - setOperationAction(ISD::GlobalAddress, sctype, Custom); - setOperationAction(ISD::ConstantPool, sctype, Custom); - setOperationAction(ISD::JumpTable, sctype, Custom); + MVT VT = (MVT::SimpleValueType)sctype; + + setOperationAction(ISD::GlobalAddress, VT, Custom); + setOperationAction(ISD::ConstantPool, VT, Custom); + setOperationAction(ISD::JumpTable, VT, Custom); } // RET must be custom lowered, to meet ABI requirements @@ -335,36 +339,38 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass); addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass); - for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; - VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { + for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; + i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { + MVT VT = (MVT::SimpleValueType)i; + // add/sub are legal for all supported vector VT's. - setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); + setOperationAction(ISD::ADD , VT, Legal); + setOperationAction(ISD::SUB , VT, Legal); // mul has to be custom lowered. - setOperationAction(ISD::MUL , (MVT::ValueType)VT, Custom); - - setOperationAction(ISD::AND , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::OR , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::XOR , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Legal); - setOperationAction(ISD::STORE, (MVT::ValueType)VT, Legal); + setOperationAction(ISD::MUL , VT, Custom); + + setOperationAction(ISD::AND , VT, Legal); + setOperationAction(ISD::OR , VT, Legal); + setOperationAction(ISD::XOR , VT, Legal); + setOperationAction(ISD::LOAD , VT, Legal); + setOperationAction(ISD::SELECT, VT, Legal); + setOperationAction(ISD::STORE, VT, Legal); // These operations need to be expanded: - setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Custom); + setOperationAction(ISD::SDIV, VT, Expand); + setOperationAction(ISD::SREM, VT, Expand); + setOperationAction(ISD::UDIV, VT, Expand); + setOperationAction(ISD::UREM, VT, Expand); + setOperationAction(ISD::FDIV, VT, Custom); // Custom lower build_vector, constant pool spills, insert and // extract vector elements: - setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::ConstantPool, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); + setOperationAction(ISD::BUILD_VECTOR, VT, Custom); + setOperationAction(ISD::ConstantPool, VT, Custom); + setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); } setOperationAction(ISD::MUL, MVT::v16i8, Custom); @@ -447,10 +453,9 @@ SPUTargetLowering::getTargetNodeName(unsigned Opcode) const return ((i != node_names.end()) ? i->second : 0); } -MVT::ValueType -SPUTargetLowering::getSetCCResultType(const SDOperand &Op) const { - MVT::ValueType VT = Op.getValueType(); - if (MVT::isInteger(VT)) +MVT SPUTargetLowering::getSetCCResultType(const SDOperand &Op) const { + MVT VT = Op.getValueType(); + if (VT.isInteger()) return VT; else return MVT::i32; @@ -490,9 +495,9 @@ static SDOperand AlignedLoad(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST, LSBaseSDNode *LSN, unsigned &alignment, int &alignOffs, int &prefSlotOffs, - MVT::ValueType &VT, bool &was16aligned) + MVT &VT, bool &was16aligned) { - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); const valtype_map_s *vtm = getValueTypeMapEntry(VT); SDOperand basePtr = LSN->getBasePtr(); SDOperand chain = LSN->getChain(); @@ -573,8 +578,8 @@ static SDOperand LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { LoadSDNode *LN = cast<LoadSDNode>(Op); SDOperand the_chain = LN->getChain(); - MVT::ValueType VT = LN->getMemoryVT(); - MVT::ValueType OpVT = Op.Val->getValueType(0); + MVT VT = LN->getMemoryVT(); + MVT OpVT = Op.Val->getValueType(0); ISD::LoadExtType ExtType = LN->getExtensionType(); unsigned alignment = LN->getAlignment(); SDOperand Ops[8]; @@ -601,7 +606,7 @@ LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { if (was16aligned) { Ops[2] = DAG.getConstant(rotamt, MVT::i16); } else { - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); LoadSDNode *LN1 = cast<LoadSDNode>(result); Ops[2] = DAG.getNode(ISD::ADD, PtrVT, LN1->getBasePtr(), DAG.getConstant(rotamt, PtrVT)); @@ -613,15 +618,15 @@ LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { if (VT == OpVT || ExtType == ISD::EXTLOAD) { SDVTList scalarvts; - MVT::ValueType vecVT = MVT::v16i8; + MVT vecVT = MVT::v16i8; // Convert the loaded v16i8 vector to the appropriate vector type // specified by the operand: if (OpVT == VT) { if (VT != MVT::i1) - vecVT = MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT))); + vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); } else - vecVT = MVT::getVectorType(OpVT, (128 / MVT::getSizeInBits(OpVT))); + vecVT = MVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits())); Ops[0] = the_chain; Ops[1] = DAG.getNode(ISD::BIT_CONVERT, vecVT, result); @@ -681,9 +686,9 @@ static SDOperand LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { StoreSDNode *SN = cast<StoreSDNode>(Op); SDOperand Value = SN->getValue(); - MVT::ValueType VT = Value.getValueType(); - MVT::ValueType StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT()); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT VT = Value.getValueType(); + MVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT()); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); unsigned alignment = SN->getAlignment(); switch (SN->getAddressingMode()) { @@ -693,11 +698,11 @@ LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { // The vector type we really want to load from the 16-byte chunk, except // in the case of MVT::i1, which has to be v16i8. - unsigned vecVT, stVecVT = MVT::v16i8; + MVT vecVT, stVecVT = MVT::v16i8; if (StVT != MVT::i1) - stVecVT = MVT::getVectorType(StVT, (128 / MVT::getSizeInBits(StVT))); - vecVT = MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT))); + stVecVT = MVT::getVectorVT(StVT, (128 / StVT.getSizeInBits())); + vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); SDOperand alignLoadVec = AlignedLoad(Op, DAG, ST, SN, alignment, @@ -773,7 +778,7 @@ LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { /// Generate the address of a constant pool entry. static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); Constant *C = CP->getConstVal(); SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); @@ -798,7 +803,7 @@ LowerConstantPool(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); SDOperand Zero = DAG.getConstant(0, PtrVT); @@ -821,7 +826,7 @@ LowerJumpTable(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); GlobalValue *GV = GSDN->getGlobal(); SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); @@ -853,7 +858,7 @@ LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { */ static SDOperand LowerConstant(SDOperand Op, SelectionDAG &DAG) { - unsigned VT = Op.getValueType(); + MVT VT = Op.getValueType(); ConstantSDNode *CN = cast<ConstantSDNode>(Op.Val); if (VT == MVT::i64) { @@ -862,7 +867,7 @@ LowerConstant(SDOperand Op, SelectionDAG &DAG) { DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T)); } else { cerr << "LowerConstant: unhandled constant type " - << MVT::getValueTypeString(VT) + << VT.getMVTString() << "\n"; abort(); /*NOTREACHED*/ @@ -874,7 +879,7 @@ LowerConstant(SDOperand Op, SelectionDAG &DAG) { //! Custom lower double precision floating point constants static SDOperand LowerConstantFP(SDOperand Op, SelectionDAG &DAG) { - unsigned VT = Op.getValueType(); + MVT VT = Op.getValueType(); ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.Val); assert((FP != 0) && @@ -894,8 +899,8 @@ static SDOperand LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { SDOperand Cond = Op.getOperand(1); - MVT::ValueType CondVT = Cond.getValueType(); - MVT::ValueType CondNVT; + MVT CondVT = Cond.getValueType(); + MVT CondNVT; if (CondVT == MVT::i1 || CondVT == MVT::i8) { CondNVT = (CondVT == MVT::i1 ? MVT::i32 : MVT::i16); @@ -924,19 +929,19 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) unsigned ArgRegIdx = 0; unsigned StackSlotSize = SPUFrameInfo::stackSlotSize(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Add DAG nodes to load the arguments or copy them out of registers. for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { SDOperand ArgVal; bool needsLoad = false; - MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); - unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; + MVT ObjectVT = Op.getValue(ArgNo).getValueType(); + unsigned ObjSize = ObjectVT.getSizeInBits()/8; - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: { cerr << "LowerFORMAL_ARGUMENTS Unhandled argument type: " - << MVT::getValueTypeString(ObjectVT) + << ObjectVT.getMVTString() << "\n"; abort(); } @@ -1032,7 +1037,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) // If the function takes variable number of arguments, make a frame index for // the start of the first vararg value... for expansion of llvm.va_start. if (isVarArg) { - VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, + VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, ArgOffset); SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); // If this function is vararg, store any remaining integer argument regs to @@ -1046,7 +1051,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by four for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); + SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } if (!MemOps.empty()) @@ -1056,7 +1061,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) ArgValues.push_back(Root); // Return the new list of results. - std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), + std::vector<MVT> RetVT(Op.Val->value_begin(), Op.Val->value_end()); return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); } @@ -1090,7 +1095,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs(); // Handy pointer type - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Accumulate how many bytes are to be pushed on the stack, including the // linkage area, and parameter passing area. According to the SPU ABI, @@ -1120,7 +1125,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff); - switch (Arg.getValueType()) { + switch (Arg.getValueType().getSimpleVT()) { default: assert(0 && "Unexpected ValueType for argument!"); case MVT::i32: case MVT::i64: @@ -1174,7 +1179,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { InFlag = Chain.getValue(1); } - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. @@ -1186,7 +1191,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { // node so that legalize doesn't hack it. if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { GlobalValue *GV = G->getGlobal(); - unsigned CalleeVT = Callee.getValueType(); + MVT CalleeVT = Callee.getValueType(); SDOperand Zero = DAG.getConstant(0, PtrVT); SDOperand GA = DAG.getTargetGlobalAddress(GV, CalleeVT); @@ -1243,7 +1248,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { NodeTys.clear(); // If the call has results, copy the values out of the ret val registers. - switch (Op.Val->getValueType(0)) { + switch (Op.Val->getValueType(0).getSimpleVT()) { default: assert(0 && "Unexpected ret value!"); case MVT::Other: break; case MVT::i32: @@ -1365,7 +1370,7 @@ getVecImm(SDNode *N) { /// and the value fits into an unsigned 18-bit constant, and if so, return the /// constant SDOperand SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType) { + MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { uint64_t Value = CN->getValue(); if (ValueType == MVT::i64) { @@ -1387,7 +1392,7 @@ SDOperand SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG, /// and the value fits into a signed 16-bit constant, and if so, return the /// constant SDOperand SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType) { + MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { int64_t Value = CN->getSignExtended(); if (ValueType == MVT::i64) { @@ -1410,7 +1415,7 @@ SDOperand SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG, /// and the value fits into a signed 10-bit constant, and if so, return the /// constant SDOperand SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType) { + MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { int64_t Value = CN->getSignExtended(); if (ValueType == MVT::i64) { @@ -1436,7 +1441,7 @@ SDOperand SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG, /// constant vectors. Thus, we test to see if the upper and lower bytes are the /// same value. SDOperand SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType) { + MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { int Value = (int) CN->getValue(); if (ValueType == MVT::i16 @@ -1455,7 +1460,7 @@ SDOperand SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG, /// and the value fits into a signed 16-bit constant, and if so, return the /// constant SDOperand SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType) { + MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { uint64_t Value = CN->getValue(); if ((ValueType == MVT::i32 @@ -1495,7 +1500,7 @@ static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], // Start with zero'd results. VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; - unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); + unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits(); for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { SDOperand OpVal = BV->getOperand(i); @@ -1597,7 +1602,7 @@ static bool isConstantSplat(const uint64_t Bits128[2], // this case more efficiently than a constant pool load, lower it to the // sequence of ops that should be used. static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); // If this is a vector of constants or undefs, get the bits. A bit in // UndefBits is set if the corresponding element of the vector is an // ISD::UNDEF value. For undefs, the corresponding VectorBits values are @@ -1608,11 +1613,11 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { int SplatSize; if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits) || !isConstantSplat(VectorBits, UndefBits, - MVT::getSizeInBits(MVT::getVectorElementType(VT)), + VT.getVectorElementType().getSizeInBits(), SplatBits, SplatUndef, SplatSize)) return SDOperand(); // Not a constant vector, not a splat. - switch (VT) { + switch (VT.getSimpleVT()) { default: case MVT::v4f32: { uint32_t Value32 = SplatBits; @@ -1649,14 +1654,14 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { Value16 = (unsigned short) (SplatBits & 0xffff); else Value16 = (unsigned short) (SplatBits | (SplatBits << 8)); - SDOperand T = DAG.getConstant(Value16, MVT::getVectorElementType(VT)); + SDOperand T = DAG.getConstant(Value16, VT.getVectorElementType()); SDOperand Ops[8]; for (int i = 0; i < 8; ++i) Ops[i] = T; return DAG.getNode(ISD::BUILD_VECTOR, VT, Ops, 8); } case MVT::v4i32: { unsigned int Value = SplatBits; - SDOperand T = DAG.getConstant(Value, MVT::getVectorElementType(VT)); + SDOperand T = DAG.getConstant(Value, VT.getVectorElementType()); return DAG.getNode(ISD::BUILD_VECTOR, VT, T, T, T, T); } case MVT::v2i64: { @@ -1772,7 +1777,7 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { // If we have a single element being moved from V1 to V2, this can be handled // using the C*[DX] compute mask instructions, but the vector elements have // to be monotonically increasing with one exception element. - MVT::ValueType EltVT = MVT::getVectorElementType(V1.getValueType()); + MVT EltVT = V1.getValueType().getVectorElementType(); unsigned EltsFromV2 = 0; unsigned V2Elt = 0; unsigned V2EltIdx0 = 0; @@ -1811,7 +1816,7 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Initialize temporary register to 0 SDOperand InitTempReg = DAG.getCopyToReg(DAG.getEntryNode(), VReg, DAG.getConstant(0, PtrVT)); @@ -1824,7 +1829,7 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { return DAG.getNode(SPUISD::SHUFB, V1.getValueType(), V2, V1, ShufMaskOp); } else { // Convert the SHUFFLE_VECTOR mask's input element units to the actual bytes. - unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; + unsigned BytesPerElement = EltVT.getSizeInBits()/8; SmallVector<SDOperand, 16> ResultMask; for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { @@ -1855,11 +1860,11 @@ static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { ConstantSDNode *CN = cast<ConstantSDNode>(Op0.Val); SmallVector<SDOperand, 16> ConstVecValues; - MVT::ValueType VT; + MVT VT; size_t n_copies; // Create a constant vector: - switch (Op.getValueType()) { + switch (Op.getValueType().getSimpleVT()) { default: assert(0 && "Unexpected constant value type in " "LowerSCALAR_TO_VECTOR"); case MVT::v16i8: n_copies = 16; VT = MVT::i8; break; @@ -1878,7 +1883,7 @@ static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { &ConstVecValues[0], ConstVecValues.size()); } else { // Otherwise, copy the value from one register to another: - switch (Op0.getValueType()) { + switch (Op0.getValueType().getSimpleVT()) { default: assert(0 && "Unexpected value type in LowerSCALAR_TO_VECTOR"); case MVT::i8: case MVT::i16: @@ -1894,7 +1899,14 @@ static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { } static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { - switch (Op.getValueType()) { + switch (Op.getValueType().getSimpleVT()) { + default: + cerr << "CellSPU: Unknown vector multiplication, got " + << Op.getValueType().getMVTString() + << "\n"; + abort(); + /*NOTREACHED*/ + case MVT::v4i32: { SDOperand rA = Op.getOperand(0); SDOperand rB = Op.getOperand(1); @@ -2020,13 +2032,6 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { DAG.getNode(ISD::OR, MVT::v4i32, LoProd, HiProd)); } - - default: - cerr << "CellSPU: Unknown vector multiplication, got " - << MVT::getValueTypeString(Op.getValueType()) - << "\n"; - abort(); - /*NOTREACHED*/ } return SDOperand(); @@ -2038,7 +2043,7 @@ static SDOperand LowerFDIVf32(SDOperand Op, SelectionDAG &DAG) { SDOperand A = Op.getOperand(0); SDOperand B = Op.getOperand(1); - unsigned VT = Op.getValueType(); + MVT VT = Op.getValueType(); unsigned VRegBR, VRegC; @@ -2077,7 +2082,7 @@ static SDOperand LowerFDIVf32(SDOperand Op, SelectionDAG &DAG) { } static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { - unsigned VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand N = Op.getOperand(0); SDOperand Elt = Op.getOperand(1); SDOperand ShufMask[16]; @@ -2104,9 +2109,11 @@ static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { // Need to generate shuffle mask and extract: int prefslot_begin = -1, prefslot_end = -1; - int elt_byte = EltNo * MVT::getSizeInBits(VT) / 8; + int elt_byte = EltNo * VT.getSizeInBits() / 8; - switch (VT) { + switch (VT.getSimpleVT()) { + default: + assert(false && "Invalid value type!"); case MVT::i8: { prefslot_begin = prefslot_end = 3; break; @@ -2159,12 +2166,12 @@ static SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { SDOperand VecOp = Op.getOperand(0); SDOperand ValOp = Op.getOperand(1); SDOperand IdxOp = Op.getOperand(2); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp); assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!"); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Use $2 because it's always 16-byte aligned and it's available: SDOperand PtrBase = DAG.getRegister(SPU::R2, PtrVT); @@ -2270,9 +2277,8 @@ static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) { - MVT::ValueType VT = Op.getValueType(); - unsigned VecVT = - MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT))); + MVT VT = Op.getValueType(); + MVT VecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); SDOperand Op0 = Op.getOperand(0); @@ -2280,9 +2286,8 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) case ISD::ZERO_EXTEND: case ISD::SIGN_EXTEND: case ISD::ANY_EXTEND: { - MVT::ValueType Op0VT = Op0.getValueType(); - unsigned Op0VecVT = - MVT::getVectorType(Op0VT, (128 / MVT::getSizeInBits(Op0VT))); + MVT Op0VT = Op0.getValueType(); + MVT Op0VecVT = MVT::getVectorVT(Op0VT, (128 / Op0VT.getSizeInBits())); assert(Op0VT == MVT::i32 && "CellSPU: Zero/sign extending something other than i32"); @@ -2361,7 +2366,7 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) case ISD::SHL: { SDOperand ShiftAmt = Op.getOperand(1); - unsigned ShiftAmtVT = unsigned(ShiftAmt.getValueType()); + MVT ShiftAmtVT = ShiftAmt.getValueType(); SDOperand Op0Vec = DAG.getNode(SPUISD::PROMOTE_SCALAR, VecVT, Op0); SDOperand MaskLower = DAG.getNode(SPUISD::SELB, VecVT, @@ -2386,9 +2391,9 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) } case ISD::SRL: { - unsigned VT = unsigned(Op.getValueType()); + MVT VT = Op.getValueType(); SDOperand ShiftAmt = Op.getOperand(1); - unsigned ShiftAmtVT = unsigned(ShiftAmt.getValueType()); + MVT ShiftAmtVT = ShiftAmt.getValueType(); SDOperand ShiftAmtBytes = DAG.getNode(ISD::SRL, ShiftAmtVT, ShiftAmt, @@ -2409,7 +2414,7 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) SDOperand Op0 = DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(0)); SDOperand ShiftAmt = Op.getOperand(1); - unsigned ShiftVT = ShiftAmt.getValueType(); + MVT ShiftVT = ShiftAmt.getValueType(); // Negate variable shift amounts if (!isa<ConstantSDNode>(ShiftAmt)) { @@ -2450,7 +2455,7 @@ static SDOperand LowerByteImmed(SDOperand Op, SelectionDAG &DAG) { SDOperand ConstVec; SDOperand Arg; - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); ConstVec = Op.getOperand(0); Arg = Op.getOperand(1); @@ -2474,7 +2479,7 @@ LowerByteImmed(SDOperand Op, SelectionDAG &DAG) { if (!GetConstantBuildVectorBits(ConstVec.Val, VectorBits, UndefBits) && isConstantSplat(VectorBits, UndefBits, - MVT::getSizeInBits(MVT::getVectorElementType(VT)), + VT.getVectorElementType().getSizeInBits(), SplatBits, SplatUndef, SplatSize)) { SDOperand tcVec[16]; SDOperand tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8); @@ -2493,12 +2498,12 @@ LowerByteImmed(SDOperand Op, SelectionDAG &DAG) { } //! Lower i32 multiplication -static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, unsigned VT, +static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, MVT VT, unsigned Opc) { - switch (VT) { + switch (VT.getSimpleVT()) { default: cerr << "CellSPU: Unknown LowerMUL value type, got " - << MVT::getValueTypeString(Op.getValueType()) + << Op.getValueType().getMVTString() << "\n"; abort(); /*NOTREACHED*/ @@ -2525,10 +2530,12 @@ static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, unsigned VT, ones per byte, which then have to be accumulated. */ static SDOperand LowerCTPOP(SDOperand Op, SelectionDAG &DAG) { - unsigned VT = Op.getValueType(); - unsigned vecVT = MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT))); + MVT VT = Op.getValueType(); + MVT vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); - switch (VT) { + switch (VT.getSimpleVT()) { + default: + assert(false && "Invalid value type!"); case MVT::i8: { SDOperand N = Op.getOperand(0); SDOperand Elt0 = DAG.getConstant(0, MVT::i32); @@ -2630,7 +2637,7 @@ SDOperand SPUTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { unsigned Opc = (unsigned) Op.getOpcode(); - unsigned VT = (unsigned) Op.getValueType(); + MVT VT = Op.getValueType(); switch (Opc) { default: { @@ -2704,7 +2711,7 @@ SPUTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) // Vector and i8 multiply: case ISD::MUL: - if (MVT::isVector(VT)) + if (VT.isVector()) return LowerVectorMUL(Op, DAG); else if (VT == MVT::i8) return LowerI8Math(Op, DAG, Opc); @@ -2911,7 +2918,7 @@ SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const std::pair<unsigned, const TargetRegisterClass*> SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const + MVT VT) const { if (Constraint.size() == 1) { // GCC RS6000 Constraint Letters @@ -2961,9 +2968,9 @@ SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, case SPUISD::PROMOTE_SCALAR: { SDOperand Op0 = Op.getOperand(0); - MVT::ValueType Op0VT = Op0.getValueType(); - unsigned Op0VTBits = MVT::getSizeInBits(Op0VT); - uint64_t InMask = MVT::getIntVTBitMask(Op0VT); + MVT Op0VT = Op0.getValueType(); + unsigned Op0VTBits = Op0VT.getSizeInBits(); + uint64_t InMask = Op0VT.getIntegerVTBitMask(); KnownZero |= APInt(Op0VTBits, ~InMask, false); KnownOne |= APInt(Op0VTBits, InMask, false); break; @@ -2972,9 +2979,9 @@ SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, case SPUISD::LDRESULT: case SPUISD::EXTRACT_ELT0: case SPUISD::EXTRACT_ELT0_CHAINED: { - MVT::ValueType OpVT = Op.getValueType(); - unsigned OpVTBits = MVT::getSizeInBits(OpVT); - uint64_t InMask = MVT::getIntVTBitMask(OpVT); + MVT OpVT = Op.getValueType(); + unsigned OpVTBits = OpVT.getSizeInBits(); + uint64_t InMask = OpVT.getIntegerVTBitMask(); KnownZero |= APInt(OpVTBits, ~InMask, false); KnownOne |= APInt(OpVTBits, InMask, false); break; diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h index 5632ee3..5c41c29 100644 --- a/lib/Target/CellSPU/SPUISelLowering.h +++ b/lib/Target/CellSPU/SPUISelLowering.h @@ -79,15 +79,15 @@ namespace llvm { /// Predicates that are used for node matching: namespace SPU { SDOperand get_vec_u18imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType); + MVT ValueType); SDOperand get_vec_i16imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType); + MVT ValueType); SDOperand get_vec_i10imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType); + MVT ValueType); SDOperand get_vec_i8imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType); + MVT ValueType); SDOperand get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType); + MVT ValueType); SDOperand get_v4i32_imm(SDNode *N, SelectionDAG &DAG); SDOperand get_v2i64_imm(SDNode *N, SelectionDAG &DAG); } @@ -109,7 +109,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ValueType for ISD::SETCC - virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDOperand &) const; /// LowerOperation - Provide custom lowering hooks for some operations. /// @@ -128,7 +128,7 @@ namespace llvm { std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; void LowerAsmOperandForConstraint(SDOperand Op, char ConstraintLetter, std::vector<SDOperand> &Ops, diff --git a/lib/Target/IA64/IA64ISelDAGToDAG.cpp b/lib/Target/IA64/IA64ISelDAGToDAG.cpp index da968b9..0a80653 100644 --- a/lib/Target/IA64/IA64ISelDAGToDAG.cpp +++ b/lib/Target/IA64/IA64ISelDAGToDAG.cpp @@ -119,7 +119,7 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDOperand Op) { bool isFP=false; - if(MVT::isFloatingPoint(Tmp1.getValueType())) + if(Tmp1.getValueType().isFloatingPoint()) isFP=true; bool isModulus=false; // is it a division or a modulus? @@ -469,9 +469,9 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { AddToISelQueue(Chain); AddToISelQueue(Address); - MVT::ValueType TypeBeingLoaded = LD->getMemoryVT(); + MVT TypeBeingLoaded = LD->getMemoryVT(); unsigned Opc; - switch (TypeBeingLoaded) { + switch (TypeBeingLoaded.getSimpleVT()) { default: #ifndef NDEBUG N->dump(CurDAG); @@ -511,7 +511,7 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { unsigned Opc; if (ISD::isNON_TRUNCStore(N)) { - switch (N->getOperand(1).getValueType()) { + switch (N->getOperand(1).getValueType().getSimpleVT()) { default: assert(0 && "unknown type in store"); case MVT::i1: { // this is a bool Opc = IA64::ST1; // we store either 0 or 1 as a byte @@ -531,7 +531,7 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { case MVT::f64: Opc = IA64::STF8; break; } } else { // Truncating store - switch(ST->getMemoryVT()) { + switch(ST->getMemoryVT().getSimpleVT()) { default: assert(0 && "unknown type in truncstore"); case MVT::i8: Opc = IA64::ST1; break; case MVT::i16: Opc = IA64::ST2; break; diff --git a/lib/Target/IA64/IA64ISelLowering.cpp b/lib/Target/IA64/IA64ISelLowering.cpp index 5d29200..8b711d8 100644 --- a/lib/Target/IA64/IA64ISelLowering.cpp +++ b/lib/Target/IA64/IA64ISelLowering.cpp @@ -139,8 +139,7 @@ const char *IA64TargetLowering::getTargetNodeName(unsigned Opcode) const { } } -MVT::ValueType -IA64TargetLowering::getSetCCResultType(const SDOperand &) const { +MVT IA64TargetLowering::getSetCCResultType(const SDOperand &) const { return MVT::i1; } @@ -181,7 +180,7 @@ IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { SDOperand newroot, argt; if(count < 8) { // need to fix this logic? maybe. - switch (getValueType(I->getType())) { + switch (getValueType(I->getType()).getSimpleVT()) { default: assert(0 && "ERROR in LowerArgs: can't lower this type of arg.\n"); case MVT::f32: @@ -286,7 +285,7 @@ IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { // Finally, inform the code generator which regs we return values in. // (see the ISD::RET: case in the instruction selector) - switch (getValueType(F.getReturnType())) { + switch (getValueType(F.getReturnType()).getSimpleVT()) { default: assert(0 && "i have no idea where to return this type!"); case MVT::isVoid: break; case MVT::i1: @@ -347,10 +346,10 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, for (unsigned i = 0, e = Args.size(); i != e; ++i) { SDOperand Val = Args[i].Node; - MVT::ValueType ObjectVT = Val.getValueType(); + MVT ObjectVT = Val.getValueType(); SDOperand ValToStore(0, 0), ValToConvert(0, 0); unsigned ObjSize=8; - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: assert(0 && "unexpected argument type!"); case MVT::i1: case MVT::i8: @@ -442,7 +441,7 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, // flagged for now, but shouldn't have to be (TODO) unsigned seenConverts = 0; for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) { - if(MVT::isFloatingPoint(RegValuesToPass[i].getValueType())) { + if(RegValuesToPass[i].getValueType().isFloatingPoint()) { Chain = DAG.getCopyToReg(Chain, IntArgRegs[i], Converts[seenConverts++], InFlag); InFlag = Chain.getValue(1); @@ -453,7 +452,7 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, unsigned usedFPArgs = 0; for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, - MVT::isInteger(RegValuesToPass[i].getValueType()) ? + RegValuesToPass[i].getValueType().isInteger() ? IntArgRegs[i] : FPArgRegs[usedFPArgs++], RegValuesToPass[i], InFlag); InFlag = Chain.getValue(1); } @@ -466,7 +465,7 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, } */ - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; std::vector<SDOperand> CallOperands; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. @@ -492,14 +491,14 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, Chain = DAG.getCopyToReg(Chain, IA64::rp, RPBeforeCall, InFlag); InFlag = Chain.getValue(1); - std::vector<MVT::ValueType> RetVals; + std::vector<MVT> RetVals; RetVals.push_back(MVT::Other); RetVals.push_back(MVT::Flag); - MVT::ValueType RetTyVT = getValueType(RetTy); + MVT RetTyVT = getValueType(RetTy); SDOperand RetVal; if (RetTyVT != MVT::isVoid) { - switch (RetTyVT) { + switch (RetTyVT.getSimpleVT()) { default: assert(0 && "Unknown value type to return!"); case MVT::i1: { // bools are just like other integers (returned in r8) // we *could* fall through to the truncate below, but this saves a @@ -573,8 +572,8 @@ LowerOperation(SDOperand Op, SelectionDAG &DAG) { return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other, AR_PFSVal); case 3: { // Copy the result into the output register & restore ar.pfs - MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); - unsigned ArgReg = MVT::isInteger(ArgVT) ? IA64::r8 : IA64::F8; + MVT ArgVT = Op.getOperand(1).getValueType(); + unsigned ArgReg = ArgVT.isInteger() ? IA64::r8 : IA64::F8; AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64); Copy = DAG.getCopyToReg(AR_PFSVal.getValue(1), ArgReg, Op.getOperand(1), @@ -588,13 +587,13 @@ LowerOperation(SDOperand Op, SelectionDAG &DAG) { return SDOperand(); } case ISD::VAARG: { - MVT::ValueType VT = getPointerTy(); + MVT VT = getPointerTy(); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); SDOperand VAList = DAG.getLoad(VT, Op.getOperand(0), Op.getOperand(1), SV, 0); // Increment the pointer, VAList, to the next vaarg SDOperand VAIncr = DAG.getNode(ISD::ADD, VT, VAList, - DAG.getConstant(MVT::getSizeInBits(VT)/8, + DAG.getConstant(VT.getSizeInBits()/8, VT)); // Store the incremented VAList to the legalized pointer VAIncr = DAG.getStore(VAList.getValue(1), VAIncr, diff --git a/lib/Target/IA64/IA64ISelLowering.h b/lib/Target/IA64/IA64ISelLowering.h index aef51f0..b26c822 100644 --- a/lib/Target/IA64/IA64ISelLowering.h +++ b/lib/Target/IA64/IA64ISelLowering.h @@ -49,7 +49,7 @@ namespace llvm { const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType: return ISD::SETCC's result type. - virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDOperand &) const; /// LowerArguments - This hook must be implemented to indicate how we should /// lower the arguments for the specified function, into the specified DAG. diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp index ceb4bed..283d693 100644 --- a/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -250,7 +250,7 @@ Select(SDOperand N) AddToISelQueue(LHS); AddToISelQueue(RHS); - MVT::ValueType VT = LHS.getValueType(); + MVT VT = LHS.getValueType(); SDNode *Carry = CurDAG->getTargetNode(Mips::SLTu, VT, Ops, 2); SDNode *AddCarry = CurDAG->getTargetNode(Mips::ADDu, VT, SDOperand(Carry,0), RHS); diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 0e233c7..16464d1 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -108,8 +108,7 @@ MipsTargetLowering(MipsTargetMachine &TM): TargetLowering(TM) } -MVT::ValueType -MipsTargetLowering::getSetCCResultType(const SDOperand &) const { +MVT MipsTargetLowering::getSetCCResultType(const SDOperand &) const { return MVT::i32; } @@ -223,7 +222,7 @@ LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) SDOperand HiPart; if (!isPIC) { - const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::i32); + const MVT *VTs = DAG.getNodeValueTypes(MVT::i32); SDOperand Ops[] = { GA }; HiPart = DAG.getNode(MipsISD::Hi, VTs, 1, Ops, 1); } else // Emit Load from Global Pointer @@ -256,7 +255,7 @@ LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) SDOperand False = Op.getOperand(3); SDOperand CC = Op.getOperand(4); - const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::i32); + const MVT *VTs = DAG.getNodeValueTypes(MVT::i32); SDOperand Ops[] = { LHS, RHS, CC }; SDOperand SetCCRes = DAG.getNode(ISD::SETCC, VTs, 1, Ops, 3); @@ -270,12 +269,12 @@ LowerJumpTable(SDOperand Op, SelectionDAG &DAG) SDOperand ResNode; SDOperand HiPart; - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { - const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::i32); + const MVT *VTs = DAG.getNodeValueTypes(MVT::i32); SDOperand Ops[] = { JTI }; HiPart = DAG.getNode(MipsISD::Hi, VTs, 1, Ops, 1); } else // Emit Load from Global Pointer @@ -341,7 +340,7 @@ LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) // To meet ABI, Mips must always allocate 16 bytes on // the stack (even if less than 4 are used as arguments) - int VTsize = MVT::getSizeInBits(MVT::i32)/8; + int VTsize = MVT(MVT::i32).getSizeInBits()/8; MFI->CreateFixedObject(VTsize, (VTsize*3)); CCInfo.AnalyzeCallOperands(Op.Val, CC_Mips); @@ -391,7 +390,7 @@ LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) // This guarantees that when allocating Local Area the firsts // 16 bytes which are alwayes reserved won't be overwritten. LastStackLoc = (16 + VA.getLocMemOffset()); - int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, + int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8, LastStackLoc); SDOperand PtrOff = DAG.getFrameIndex(FI,getPointerTy()); @@ -575,7 +574,7 @@ LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) // Arguments stored on registers if (VA.isRegLoc()) { - MVT::ValueType RegVT = VA.getLocVT(); + MVT RegVT = VA.getLocVT(); TargetRegisterClass *RC; if (RegVT == MVT::i32) @@ -738,8 +737,7 @@ getConstraintType(const std::string &Constraint) const } std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering:: -getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const +getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { @@ -753,7 +751,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, std::vector<unsigned> MipsTargetLowering:: getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const + MVT VT) const { if (Constraint.size() != 1) return std::vector<unsigned>(); diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h index cda36ac..6f621b4 100644 --- a/lib/Target/Mips/MipsISelLowering.h +++ b/lib/Target/Mips/MipsISelLowering.h @@ -66,7 +66,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - get the ISD::SETCC result ValueType - MVT::ValueType getSetCCResultType(const SDOperand &) const; + MVT getSetCCResultType(const SDOperand &) const; private: // Lower Operand helpers @@ -93,11 +93,11 @@ namespace llvm { std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; }; } diff --git a/lib/Target/PIC16/PIC16ISelLowering.cpp b/lib/Target/PIC16/PIC16ISelLowering.cpp index 210f216..deab5d5 100644 --- a/lib/Target/PIC16/PIC16ISelLowering.cpp +++ b/lib/Target/PIC16/PIC16ISelLowering.cpp @@ -204,7 +204,7 @@ SDOperand PIC16TargetLowering:: LowerOperation(SDOperand Op, SelectionDAG &DAG) SDOperand PIC16TargetLowering::LowerBR_CC(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand Chain = Op.getOperand(0); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); SDOperand LHS = Op.getOperand(2); @@ -278,7 +278,7 @@ SDOperand PIC16TargetLowering::LowerBR_CC(SDOperand Op, SelectionDAG &DAG) SDOperand PIC16TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); GlobalValue *GV = GSDN->getGlobal(); @@ -626,7 +626,10 @@ SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, return Stores[0]; } - switch(Src.getValueType()) { + switch(Src.getValueType().getSimpleVT()) { + default: + assert(false && "Invalid value type!"); + case MVT::i8: break; diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index d73d8aa..45a0831 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -921,7 +921,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { case ISD::LOAD: { // Handle preincrement loads. LoadSDNode *LD = cast<LoadSDNode>(Op); - MVT::ValueType LoadedVT = LD->getMemoryVT(); + MVT LoadedVT = LD->getMemoryVT(); // Normal loads are handled by code generated from the .td file. if (LD->getAddressingMode() != ISD::PRE_INC) @@ -936,7 +936,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { if (LD->getValueType(0) != MVT::i64) { // Handle PPC32 integer and normal FP loads. assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load"); - switch (LoadedVT) { + switch (LoadedVT.getSimpleVT()) { default: assert(0 && "Invalid PPC load type!"); case MVT::f64: Opcode = PPC::LFDU; break; case MVT::f32: Opcode = PPC::LFSU; break; @@ -948,7 +948,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { } else { assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!"); assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load"); - switch (LoadedVT) { + switch (LoadedVT.getSimpleVT()) { default: assert(0 && "Invalid PPC load type!"); case MVT::i64: Opcode = PPC::LDU; break; case MVT::i32: Opcode = PPC::LWZU8; break; diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 097e256..f528143 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -256,50 +256,52 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { // First set operation action for all vector types to expand. Then we // will selectively turn on ones that can be effectively codegen'd. - for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; - VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { + for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; + i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { + MVT VT = (MVT::SimpleValueType)i; + // add/sub are legal for all supported vector VT's. - setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); + setOperationAction(ISD::ADD , VT, Legal); + setOperationAction(ISD::SUB , VT, Legal); // We promote all shuffles to v16i8. - setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); + AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); // We promote all non-typed operations to v4i32. - setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32); - setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32); - setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32); - setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32); - setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32); - setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32); + setOperationAction(ISD::AND , VT, Promote); + AddPromotedToType (ISD::AND , VT, MVT::v4i32); + setOperationAction(ISD::OR , VT, Promote); + AddPromotedToType (ISD::OR , VT, MVT::v4i32); + setOperationAction(ISD::XOR , VT, Promote); + AddPromotedToType (ISD::XOR , VT, MVT::v4i32); + setOperationAction(ISD::LOAD , VT, Promote); + AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); + setOperationAction(ISD::SELECT, VT, Promote); + AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); + setOperationAction(ISD::STORE, VT, Promote); + AddPromotedToType (ISD::STORE, VT, MVT::v4i32); // No other operations are legal. - setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); + setOperationAction(ISD::MUL , VT, Expand); + setOperationAction(ISD::SDIV, VT, Expand); + setOperationAction(ISD::SREM, VT, Expand); + setOperationAction(ISD::UDIV, VT, Expand); + setOperationAction(ISD::UREM, VT, Expand); + setOperationAction(ISD::FDIV, VT, Expand); + setOperationAction(ISD::FNEG, VT, Expand); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); + setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); + setOperationAction(ISD::BUILD_VECTOR, VT, Expand); + setOperationAction(ISD::UMUL_LOHI, VT, Expand); + setOperationAction(ISD::SMUL_LOHI, VT, Expand); + setOperationAction(ISD::UDIVREM, VT, Expand); + setOperationAction(ISD::SDIVREM, VT, Expand); + setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); + setOperationAction(ISD::FPOW, VT, Expand); + setOperationAction(ISD::CTPOP, VT, Expand); + setOperationAction(ISD::CTLZ, VT, Expand); + setOperationAction(ISD::CTTZ, VT, Expand); } // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle @@ -420,8 +422,7 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { } -MVT::ValueType -PPCTargetLowering::getSetCCResultType(const SDOperand &) const { +MVT PPCTargetLowering::getSetCCResultType(const SDOperand &) const { return MVT::i32; } @@ -690,7 +691,7 @@ SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { uint64_t Value = 0; if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { Value = CN->getValue(); - ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8; + ValSizeInBytes = CN->getValueType(0).getSizeInBits()/8; } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); Value = FloatToBits(CN->getValueAPF().convertToFloat()); @@ -1007,7 +1008,7 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, if (!EnablePPCPreinc) return false; SDOperand Ptr; - MVT::ValueType VT; + MVT VT; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { Ptr = LD->getBasePtr(); VT = LD->getMemoryVT(); @@ -1020,7 +1021,7 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, return false; // PowerPC doesn't have preinc load/store instructions for vectors. - if (MVT::isVector(VT)) + if (VT.isVector()) return false; // TODO: Check reg+reg first. @@ -1055,7 +1056,7 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, SDOperand PPCTargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); Constant *C = CP->getConstVal(); SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); @@ -1086,7 +1087,7 @@ SDOperand PPCTargetLowering::LowerConstantPool(SDOperand Op, } SDOperand PPCTargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); SDOperand Zero = DAG.getConstant(0, PtrVT); @@ -1123,7 +1124,7 @@ SDOperand PPCTargetLowering::LowerGlobalTLSAddress(SDOperand Op, SDOperand PPCTargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); GlobalValue *GV = GSDN->getGlobal(); SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); @@ -1170,13 +1171,13 @@ SDOperand PPCTargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { // fold the new nodes. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { if (C->isNullValue() && CC == ISD::SETEQ) { - MVT::ValueType VT = Op.getOperand(0).getValueType(); + MVT VT = Op.getOperand(0).getValueType(); SDOperand Zext = Op.getOperand(0); if (VT < MVT::i32) { VT = MVT::i32; Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); } - unsigned Log2b = Log2_32(MVT::getSizeInBits(VT)); + unsigned Log2b = Log2_32(VT.getSizeInBits()); SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, DAG.getConstant(Log2b, MVT::i32)); @@ -1194,9 +1195,9 @@ SDOperand PPCTargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { // condition register, reading it back out, and masking the correct bit. The // normal approach here uses sub to do this instead of xor. Using xor exposes // the result to other bit-twiddling opportunities. - MVT::ValueType LHSVT = Op.getOperand(0).getValueType(); - if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { - MVT::ValueType VT = Op.getValueType(); + MVT LHSVT = Op.getOperand(0).getValueType(); + if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { + MVT VT = Op.getValueType(); SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0), Op.getOperand(1)); return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); @@ -1225,7 +1226,7 @@ SDOperand PPCTargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG, if (Subtarget.isMachoABI()) { // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0); @@ -1260,15 +1261,15 @@ SDOperand PPCTargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG, SDOperand ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); SDOperand StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT); SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); - uint64_t FrameOffset = MVT::getSizeInBits(PtrVT)/8; + uint64_t FrameOffset = PtrVT.getSizeInBits()/8; SDOperand ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); - uint64_t StackOffset = MVT::getSizeInBits(PtrVT)/8 - 1; + uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; SDOperand ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); uint64_t FPROffset = 1; @@ -1325,9 +1326,9 @@ static const unsigned *GetFPR(const PPCSubtarget &Subtarget) { /// the stack. static unsigned CalculateStackSlotSize(SDOperand Arg, SDOperand Flag, bool isVarArg, unsigned PtrByteSize) { - MVT::ValueType ArgVT = Arg.getValueType(); + MVT ArgVT = Arg.getValueType(); ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Flag)->getArgFlags(); - unsigned ArgSize =MVT::getSizeInBits(ArgVT)/8; + unsigned ArgSize =ArgVT.getSizeInBits()/8; if (Flags.isByVal()) ArgSize = Flags.getByValSize(); ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; @@ -1352,7 +1353,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SDOperand Root = Op.getOperand(0); bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); bool isPPC64 = PtrVT == MVT::i64; bool isMachoABI = Subtarget.isMachoABI(); bool isELF32_ABI = Subtarget.isELF32_ABI(); @@ -1402,8 +1403,8 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, if (!isVarArg && !isPPC64) { for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { - MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); - unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; + MVT ObjectVT = Op.getValue(ArgNo).getValueType(); + unsigned ObjSize = ObjectVT.getSizeInBits()/8; ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags(); @@ -1416,7 +1417,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, continue; } - switch(ObjectVT) { + switch(ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); case MVT::i32: case MVT::f32: @@ -1453,8 +1454,8 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { SDOperand ArgVal; bool needsLoad = false; - MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); - unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; + MVT ObjectVT = Op.getValue(ArgNo).getValueType(); + unsigned ObjSize = ObjectVT.getSizeInBits()/8; unsigned ArgSize = ObjSize; ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags(); @@ -1535,7 +1536,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, continue; } - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); case MVT::i32: if (!isPPC64) { @@ -1693,18 +1694,18 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, // Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame // pointer. - depth = -(Num_GPR_Regs * MVT::getSizeInBits(PtrVT)/8 + - Num_FPR_Regs * MVT::getSizeInBits(MVT::f64)/8 + - MVT::getSizeInBits(PtrVT)/8); + depth = -(Num_GPR_Regs * PtrVT.getSizeInBits()/8 + + Num_FPR_Regs * MVT(MVT::f64).getSizeInBits()/8 + + PtrVT.getSizeInBits()/8); - VarArgsStackOffset = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, + VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, ArgOffset); } else depth = ArgOffset; - VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, + VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, depth); SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); @@ -1716,7 +1717,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by four for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); + SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } } @@ -1736,7 +1737,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by four for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); + SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } @@ -1748,7 +1749,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by eight for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8, + SDOperand PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } @@ -1762,7 +1763,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by eight for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8, + SDOperand PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } @@ -1775,7 +1776,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, ArgValues.push_back(Root); // Return the new list of results. - std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), + std::vector<MVT> RetVT(Op.Val->value_begin(), Op.Val->value_end()); return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); } @@ -1807,7 +1808,7 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, for (unsigned i = 0; i != NumOps; ++i) { SDOperand Arg = Call.getOperand(5+2*i); SDOperand Flag = Call.getOperand(5+2*i+1); - MVT::ValueType ArgVT = Arg.getValueType(); + MVT ArgVT = Arg.getValueType(); // Varargs Altivec parameters are padded to a 16 byte boundary. if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { @@ -1970,7 +1971,7 @@ static SDOperand EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, isMachoABI); int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc); - MVT::ValueType VT = isPPC64 ? MVT::i64 : MVT::i32; + MVT VT = isPPC64 ? MVT::i64 : MVT::i32; SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); Chain = DAG.getStore(Chain, OldRetAddr, NewRetAddrFrIdx, PseudoSourceValue::getFixedStack(), NewRetAddr); @@ -1988,9 +1989,9 @@ CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, SDOperand Arg, int SPDiff, unsigned ArgOffset, SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { int Offset = ArgOffset + SPDiff; - uint32_t OpSize = (MVT::getSizeInBits(Arg.getValueType())+7)/8; + uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); - MVT::ValueType VT = isPPC64 ? MVT::i64 : MVT::i32; + MVT VT = isPPC64 ? MVT::i64 : MVT::i32; SDOperand FIN = DAG.getFrameIndex(FI, VT); TailCallArgumentInfo Info; Info.Arg = Arg; @@ -2009,7 +2010,7 @@ SDOperand PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, SDOperand &FPOpOut) { if (SPDiff) { // Load the LR and FP stack slot for later adjusting. - MVT::ValueType VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; + MVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; LROpOut = getReturnAddrFrameIndex(DAG); LROpOut = DAG.getLoad(VT, Chain, LROpOut, NULL, 0); Chain = SDOperand(LROpOut.Val, 1); @@ -2043,7 +2044,7 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDOperand Chain, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVector<SDOperand, 8> &MemOpChains, SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); if (!isTailCall) { if (isVector) { SDOperand StackPtr; @@ -2074,7 +2075,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, bool isMachoABI = Subtarget.isMachoABI(); bool isELF32_ABI = Subtarget.isELF32_ABI(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); bool isPPC64 = PtrVT == MVT::i64; unsigned PtrByteSize = isPPC64 ? 8 : 4; @@ -2192,7 +2193,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, if (Size==1 || Size==2) { // Very small objects are passed right-justified. // Everything else is passed left-justified. - MVT::ValueType VT = (Size==1) ? MVT::i8 : MVT::i16; + MVT VT = (Size==1) ? MVT::i8 : MVT::i16; if (GPR_idx != NumGPRs) { SDOperand Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, Chain, Arg, NULL, 0, VT); @@ -2244,7 +2245,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, continue; } - switch (Arg.getValueType()) { + switch (Arg.getValueType().getSimpleVT()) { default: assert(0 && "Unexpected ValueType for argument!"); case MVT::i32: case MVT::i64: @@ -2384,7 +2385,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, ArgOffset += 12*16; for (unsigned i = 0; i != NumOps; ++i) { SDOperand Arg = Op.getOperand(5+2*i); - MVT::ValueType ArgType = Arg.getValueType(); + MVT ArgType = Arg.getValueType(); if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { if (++j > NumVRs) { @@ -2450,7 +2451,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, InFlag = Chain.getValue(1); } - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. @@ -2544,7 +2545,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // Copy all of the result registers out of their specified physreg. for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { CCValAssign &VA = RVLocs[i]; - MVT::ValueType VT = VA.getValVT(); + MVT VT = VA.getValVT(); assert(VA.isRegLoc() && "Can only return in registers!"); Chain = DAG.getCopyFromReg(Chain, VA.getLocReg(), VT, InFlag).getValue(1); ResultVals.push_back(Chain.getValue(0)); @@ -2629,7 +2630,7 @@ SDOperand PPCTargetLowering::LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG, // When we pop the dynamic allocation we need to restore the SP link. // Get the corect type for pointers. - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Construct the stack pointer operand. bool IsPPC64 = Subtarget.isPPC64(); @@ -2657,7 +2658,7 @@ PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { MachineFunction &MF = DAG.getMachineFunction(); bool IsPPC64 = PPCSubTarget.isPPC64(); bool isMachoABI = PPCSubTarget.isMachoABI(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Get current frame pointer save index. The users of this index will be // primarily DYNALLOC instructions. @@ -2681,7 +2682,7 @@ PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { MachineFunction &MF = DAG.getMachineFunction(); bool IsPPC64 = PPCSubTarget.isPPC64(); bool isMachoABI = PPCSubTarget.isMachoABI(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Get current frame pointer save index. The users of this index will be // primarily DYNALLOC instructions. @@ -2709,7 +2710,7 @@ SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, SDOperand Size = Op.getOperand(1); // Get the corect type for pointers. - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Negate the size. SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT, DAG.getConstant(0, PtrVT), Size); @@ -2722,13 +2723,13 @@ SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, } SDOperand PPCTargetLowering::LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.Val->getValueType(0); + MVT VT = Op.Val->getValueType(0); SDOperand Chain = Op.getOperand(0); SDOperand Ptr = Op.getOperand(1); SDOperand Incr = Op.getOperand(2); // Issue a "load and reserve". - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(VT); VTs.push_back(MVT::Other); @@ -2758,14 +2759,14 @@ SDOperand PPCTargetLowering::LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG) { } SDOperand PPCTargetLowering::LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.Val->getValueType(0); + MVT VT = Op.Val->getValueType(0); SDOperand Chain = Op.getOperand(0); SDOperand Ptr = Op.getOperand(1); SDOperand NewVal = Op.getOperand(2); SDOperand OldVal = Op.getOperand(3); // Issue a "load and reserve". - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(VT); VTs.push_back(MVT::Other); @@ -2801,13 +2802,13 @@ SDOperand PPCTargetLowering::LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG) { } SDOperand PPCTargetLowering::LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.Val->getValueType(0); + MVT VT = Op.Val->getValueType(0); SDOperand Chain = Op.getOperand(0); SDOperand Ptr = Op.getOperand(1); SDOperand NewVal = Op.getOperand(2); // Issue a "load and reserve". - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(VT); VTs.push_back(MVT::Other); @@ -2837,8 +2838,8 @@ SDOperand PPCTargetLowering::LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG) { /// possible. SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { // Not FP? Not a fsel. - if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) || - !MVT::isFloatingPoint(Op.getOperand(2).getValueType())) + if (!Op.getOperand(0).getValueType().isFloatingPoint() || + !Op.getOperand(2).getValueType().isFloatingPoint()) return SDOperand(); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); @@ -2846,8 +2847,8 @@ SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { // Cannot handle SETEQ/SETNE. if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand(); - MVT::ValueType ResVT = Op.getValueType(); - MVT::ValueType CmpVT = Op.getOperand(0).getValueType(); + MVT ResVT = Op.getValueType(); + MVT CmpVT = Op.getOperand(0).getValueType(); SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3); @@ -2916,13 +2917,13 @@ SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { // FIXME: Split this code up when LegalizeDAGTypes lands. SDOperand PPCTargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { - assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType())); + assert(Op.getOperand(0).getValueType().isFloatingPoint()); SDOperand Src = Op.getOperand(0); if (Src.getValueType() == MVT::f32) Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); SDOperand Tmp; - switch (Op.getValueType()) { + switch (Op.getValueType().getSimpleVT()) { default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); case MVT::i32: Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); @@ -2958,7 +2959,7 @@ SDOperand PPCTargetLowering::LowerFP_ROUND_INREG(SDOperand Op, // This sequence changes FPSCR to do round-to-zero, adds the two halves // of the long double, and puts FPSCR back the way it was. We do not // actually model FPSCR. - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; SDOperand Ops[4], Result, MFFSreg, InFlag, FPreg; NodeTys.push_back(MVT::f64); // Return register @@ -3026,7 +3027,7 @@ SDOperand PPCTargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { // then lfd it and fcfid it. MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); int FrameIdx = FrameInfo->CreateStackObject(8, 8); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, @@ -3069,9 +3070,9 @@ SDOperand PPCTargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { */ MachineFunction &MF = DAG.getMachineFunction(); - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); - std::vector<MVT::ValueType> NodeTys; + MVT VT = Op.getValueType(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + std::vector<MVT> NodeTys; SDOperand MFFSreg, InFlag; // Save FP Control Word to register @@ -3105,13 +3106,13 @@ SDOperand PPCTargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { SDOperand RetVal = DAG.getNode(ISD::XOR, MVT::i32, CWD1, CWD2); - return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? + return DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); } SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - unsigned BitWidth = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + unsigned BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && "Unexpected SHL!"); @@ -3121,7 +3122,7 @@ SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { SDOperand Lo = Op.getOperand(0); SDOperand Hi = Op.getOperand(1); SDOperand Amt = Op.getOperand(2); - MVT::ValueType AmtVT = Amt.getValueType(); + MVT AmtVT = Amt.getValueType(); SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, DAG.getConstant(BitWidth, AmtVT), Amt); @@ -3139,8 +3140,8 @@ SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { } SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - unsigned BitWidth = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + unsigned BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && "Unexpected SRL!"); @@ -3150,7 +3151,7 @@ SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { SDOperand Lo = Op.getOperand(0); SDOperand Hi = Op.getOperand(1); SDOperand Amt = Op.getOperand(2); - MVT::ValueType AmtVT = Amt.getValueType(); + MVT AmtVT = Amt.getValueType(); SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, DAG.getConstant(BitWidth, AmtVT), Amt); @@ -3168,8 +3169,8 @@ SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { } SDOperand PPCTargetLowering::LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - unsigned BitWidth = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + unsigned BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && "Unexpected SRA!"); @@ -3178,7 +3179,7 @@ SDOperand PPCTargetLowering::LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) { SDOperand Lo = Op.getOperand(0); SDOperand Hi = Op.getOperand(1); SDOperand Amt = Op.getOperand(2); - MVT::ValueType AmtVT = Amt.getValueType(); + MVT AmtVT = Amt.getValueType(); SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, DAG.getConstant(BitWidth, AmtVT), Amt); @@ -3210,7 +3211,7 @@ static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], // Start with zero'd results. VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; - unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); + unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits(); for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { SDOperand OpVal = BV->getOperand(i); @@ -3296,26 +3297,26 @@ static bool isConstantSplat(const uint64_t Bits128[2], /// BuildSplatI - Build a canonical splati of Val with an element size of /// SplatSize. Cast the result to VT. -static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT, +static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT VT, SelectionDAG &DAG) { assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); - static const MVT::ValueType VTys[] = { // canonical VT to use for each size. + static const MVT VTys[] = { // canonical VT to use for each size. MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 }; - MVT::ValueType ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; + MVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. if (Val == -1) SplatSize = 1; - MVT::ValueType CanonicalVT = VTys[SplatSize-1]; + MVT CanonicalVT = VTys[SplatSize-1]; // Build a canonical splat for this value. - SDOperand Elt = DAG.getConstant(Val, MVT::getVectorElementType(CanonicalVT)); + SDOperand Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType()); SmallVector<SDOperand, 8> Ops; - Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt); + Ops.assign(CanonicalVT.getVectorNumElements(), Elt); SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, &Ops[0], Ops.size()); return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res); @@ -3325,7 +3326,7 @@ static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT, /// specified intrinsic ID. static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, SelectionDAG &DAG, - MVT::ValueType DestVT = MVT::Other) { + MVT DestVT = MVT::Other) { if (DestVT == MVT::Other) DestVT = LHS.getValueType(); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, DAG.getConstant(IID, MVT::i32), LHS, RHS); @@ -3335,7 +3336,7 @@ static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, /// specified intrinsic ID. static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, SDOperand Op2, SelectionDAG &DAG, - MVT::ValueType DestVT = MVT::Other) { + MVT DestVT = MVT::Other) { if (DestVT == MVT::Other) DestVT = Op0.getValueType(); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); @@ -3345,7 +3346,7 @@ static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified /// amount. The result has the specified value type. static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt, - MVT::ValueType VT, SelectionDAG &DAG) { + MVT VT, SelectionDAG &DAG) { // Force LHS/RHS to be the right type. LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS); RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS); @@ -3705,8 +3706,8 @@ SDOperand PPCTargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except // that it is in input element units, not in bytes. Convert now. - MVT::ValueType EltVT = MVT::getVectorElementType(V1.getValueType()); - unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; + MVT EltVT = V1.getValueType().getVectorElementType(); + unsigned BytesPerElement = EltVT.getSizeInBits()/8; SmallVector<SDOperand, 16> ResultMask; for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { @@ -3794,7 +3795,7 @@ SDOperand PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, Op.getOperand(3), // RHS DAG.getConstant(CompareOpc, MVT::i32) }; - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(Op.getOperand(2).getValueType()); VTs.push_back(MVT::Flag); SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); @@ -3843,7 +3844,7 @@ SDOperand PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, // Create a stack slot that is 16-byte aligned. MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); int FrameIdx = FrameInfo->CreateStackObject(16, 16); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); // Store the input value into Value#0 of the stack slot. @@ -4154,7 +4155,7 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, SDOperand Load = N->getOperand(0); LoadSDNode *LD = cast<LoadSDNode>(Load); // Create the byte-swapping load. - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(MVT::i32); VTs.push_back(MVT::Other); SDOperand MO = DAG.getMemOperand(LD->getMemOperand()); @@ -4264,7 +4265,7 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); // Create the PPCISD altivec 'dot' comparison node. - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; SDOperand Ops[] = { LHS.getOperand(2), // LHS of compare LHS.getOperand(3), // RHS of compare @@ -4367,7 +4368,7 @@ PPCTargetLowering::getConstraintType(const std::string &Constraint) const { std::pair<unsigned, const TargetRegisterClass*> PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { if (Constraint.size() == 1) { // GCC RS6000 Constraint Letters switch (Constraint[0]) { @@ -4527,7 +4528,7 @@ SDOperand PPCTargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) return SDOperand(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); bool isPPC64 = PtrVT == MVT::i64; MachineFunction &MF = DAG.getMachineFunction(); diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index d28799a..34012ff 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -235,7 +235,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ISD::SETCC ValueType - virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDOperand &) const; /// getPreIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if the node's address @@ -290,7 +290,7 @@ namespace llvm { ConstraintType getConstraintType(const std::string &Constraint) const; std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// function arguments in the caller parameter area. This is the actual diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 3d5ad0b..89172fc 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -91,9 +91,9 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { std::vector<SDOperand> OutChains; for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { - MVT::ValueType ObjectVT = getValueType(I->getType()); + MVT ObjectVT = getValueType(I->getType()); - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); case MVT::i1: case MVT::i8: @@ -123,7 +123,7 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { ISD::LoadExtType LoadOp = ISD::SEXTLOAD; // Sparc is big endian, so add an offset based on the ObjectVT. - unsigned Offset = 4-std::max(1U, MVT::getSizeInBits(ObjectVT)/8); + unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8); FIPtr = DAG.getNode(ISD::ADD, MVT::i32, FIPtr, DAG.getConstant(Offset, MVT::i32)); Load = DAG.getExtLoad(LoadOp, MVT::i32, Root, FIPtr, @@ -246,7 +246,7 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { // Count the size of the outgoing arguments. unsigned ArgsSize = 0; for (unsigned i = 5, e = Op.getNumOperands(); i != e; i += 2) { - switch (Op.getOperand(i).getValueType()) { + switch (Op.getOperand(i).getValueType().getSimpleVT()) { default: assert(0 && "Unknown value type!"); case MVT::i1: case MVT::i8: @@ -323,10 +323,10 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { for (unsigned i = 5, e = Op.getNumOperands(); i != e; i += 2) { SDOperand Val = Op.getOperand(i); - MVT::ValueType ObjectVT = Val.getValueType(); + MVT ObjectVT = Val.getValueType(); SDOperand ValToStore(0, 0); unsigned ObjSize; - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); case MVT::i32: ObjSize = 4; @@ -414,7 +414,7 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. SDOperand Ops[] = { Chain, Callee, InFlag }; @@ -744,7 +744,7 @@ static SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG) { // Get the condition flag. SDOperand CompareFlag; if (LHS.getValueType() == MVT::i32) { - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(MVT::i32); VTs.push_back(MVT::Flag); SDOperand Ops[2] = { LHS, RHS }; @@ -774,7 +774,7 @@ static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { SDOperand CompareFlag; if (LHS.getValueType() == MVT::i32) { - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(LHS.getValueType()); // subcc returns a value VTs.push_back(MVT::Flag); SDOperand Ops[2] = { LHS, RHS }; @@ -804,14 +804,14 @@ static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, static SDOperand LowerVAARG(SDOperand Op, SelectionDAG &DAG) { SDNode *Node = Op.Val; - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); SDOperand InChain = Node->getOperand(0); SDOperand VAListPtr = Node->getOperand(1); const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); SDOperand VAList = DAG.getLoad(MVT::i32, InChain, VAListPtr, SV, 0); // Increment the pointer, VAList, to the next vaarg SDOperand NextPtr = DAG.getNode(ISD::ADD, MVT::i32, VAList, - DAG.getConstant(MVT::getSizeInBits(VT)/8, + DAG.getConstant(VT.getSizeInBits()/8, MVT::i32)); // Store the incremented VAList to the legalized pointer InChain = DAG.getStore(VAList.getValue(1), NextPtr, @@ -846,7 +846,7 @@ static SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG) { // to provide a register spill area. SDOperand NewVal = DAG.getNode(ISD::ADD, MVT::i32, NewSP, DAG.getConstant(96, MVT::i32)); - std::vector<MVT::ValueType> Tys; + std::vector<MVT> Tys; Tys.push_back(MVT::i32); Tys.push_back(MVT::Other); SDOperand Ops[2] = { NewVal, Chain }; diff --git a/lib/Target/TargetRegisterInfo.cpp b/lib/Target/TargetRegisterInfo.cpp index 9c8de12..3f44a0c 100644 --- a/lib/Target/TargetRegisterInfo.cpp +++ b/lib/Target/TargetRegisterInfo.cpp @@ -48,8 +48,7 @@ namespace { /// register of the given type. If type is MVT::Other, then just return any /// register class the register belongs to. const TargetRegisterClass * -TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, - MVT::ValueType VT) const { +TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, MVT VT) const { assert(isPhysicalRegister(reg) && "reg must be a physical register"); // Pick the register class of the right type that contains this physreg. diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td index 474f910..b7fadd1 100644 --- a/lib/Target/TargetSelectionDAG.td +++ b/lib/Target/TargetSelectionDAG.td @@ -53,8 +53,8 @@ class SDTCisOpSmallerThanOp<int SmallOp, int BigOp> : SDTypeConstraint<SmallOp>{ /// SDTCisIntVectorOfSameSize - This indicates that ThisOp and OtherOp are /// vector types, and that ThisOp is the result of -/// MVT::getIntVectorWithNumElements with the number of elements that ThisOp -/// has. +/// MVT::getIntVectorWithNumElements with the number of elements +/// that ThisOp has. class SDTCisIntVectorOfSameSize<int ThisOp, int OtherOp> : SDTypeConstraint<ThisOp> { int OtherOpNum = OtherOp; @@ -467,8 +467,8 @@ class PatLeaf<dag frag, code pred = [{}], SDNodeXForm xform = NOOP_SDNodeXForm> // Leaf fragments. -def vtInt : PatLeaf<(vt), [{ return MVT::isInteger(N->getVT()); }]>; -def vtFP : PatLeaf<(vt), [{ return MVT::isFloatingPoint(N->getVT()); }]>; +def vtInt : PatLeaf<(vt), [{ return N->getVT().isInteger(); }]>; +def vtFP : PatLeaf<(vt), [{ return N->getVT().isFloatingPoint(); }]>; def immAllOnes : PatLeaf<(imm), [{ return N->isAllOnesValue(); }]>; def immAllOnesV: PatLeaf<(build_vector), [{ diff --git a/lib/Target/X86/X86ATTAsmPrinter.cpp b/lib/Target/X86/X86ATTAsmPrinter.cpp index 5cae112..eacab47 100644 --- a/lib/Target/X86/X86ATTAsmPrinter.cpp +++ b/lib/Target/X86/X86ATTAsmPrinter.cpp @@ -215,7 +215,7 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo, O << '%'; unsigned Reg = MO.getReg(); if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) { - MVT::ValueType VT = (strcmp(Modifier+6,"64") == 0) ? + MVT VT = (strcmp(Modifier+6,"64") == 0) ? MVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? MVT::i32 : ((strcmp(Modifier+6,"16") == 0) ? MVT::i16 : MVT::i8)); Reg = getX86SubSuperRegister(Reg, VT); diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index bb8c58a..5ee9122 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -215,7 +215,7 @@ namespace { /// getTruncate - return an SDNode that implements a subreg based truncate /// of the specified operand to the the specified value type. - SDNode *getTruncate(SDOperand N0, MVT::ValueType VT); + SDNode *getTruncate(SDOperand N0, MVT VT); #ifndef NDEBUG unsigned Indent; @@ -329,7 +329,7 @@ bool X86DAGToDAGISel::CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const { // NU), then TF is a predecessor of FU and a successor of NU. But since // NU and FU are flagged together, this effectively creates a cycle. bool HasFlagUse = false; - MVT::ValueType VT = Root->getValueType(Root->getNumValues()-1); + MVT VT = Root->getValueType(Root->getNumValues()-1); while ((VT == MVT::Flag && !Root->use_empty())) { SDNode *FU = findFlagUse(Root); if (FU == NULL) @@ -440,8 +440,8 @@ void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) { SDOperand N1 = I->getOperand(1); SDOperand N2 = I->getOperand(2); - if ((MVT::isFloatingPoint(N1.getValueType()) && - !MVT::isVector(N1.getValueType())) || + if ((N1.getValueType().isFloatingPoint() && + !N1.getValueType().isVector()) || !N1.hasOneUse()) continue; @@ -505,8 +505,8 @@ void X86DAGToDAGISel::PreprocessForFPConvert(SelectionDAG &DAG) { // If the source and destination are SSE registers, then this is a legal // conversion that should not be lowered. - MVT::ValueType SrcVT = N->getOperand(0).getValueType(); - MVT::ValueType DstVT = N->getValueType(0); + MVT SrcVT = N->getOperand(0).getValueType(); + MVT DstVT = N->getValueType(0); bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT); bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT); if (SrcIsSSE && DstIsSSE) @@ -524,7 +524,7 @@ void X86DAGToDAGISel::PreprocessForFPConvert(SelectionDAG &DAG) { // Here we could have an FP stack truncation or an FPStack <-> SSE convert. // FPStack has extload and truncstore. SSE can fold direct loads into other // operations. Based on this, decide what we want to do. - MVT::ValueType MemVT; + MVT MemVT; if (N->getOpcode() == ISD::FP_ROUND) MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. else @@ -942,7 +942,7 @@ bool X86DAGToDAGISel::SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base, if (MatchAddress(N, AM)) return false; - MVT::ValueType VT = N.getValueType(); + MVT VT = N.getValueType(); if (AM.BaseType == X86ISelAddressMode::RegBase) { if (!AM.Base.Reg.Val) AM.Base.Reg = CurDAG->getRegister(0, VT); @@ -1016,7 +1016,7 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDOperand Op, SDOperand N, if (MatchAddress(N, AM)) return false; - MVT::ValueType VT = N.getValueType(); + MVT VT = N.getValueType(); unsigned Complexity = 0; if (AM.BaseType == X86ISelAddressMode::RegBase) if (AM.Base.Reg.Val) @@ -1110,16 +1110,17 @@ static SDNode *FindCallStartFromCall(SDNode *Node) { return FindCallStartFromCall(Node->getOperand(0).Val); } -SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT::ValueType VT) { +SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT VT) { SDOperand SRIdx; - switch (VT) { + switch (VT.getSimpleVT()) { + default: assert(0 && "Unknown truncate!"); case MVT::i8: SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1 // Ensure that the source register has an 8-bit subreg on 32-bit targets if (!Subtarget->is64Bit()) { unsigned Opc; - MVT::ValueType VT; - switch (N0.getValueType()) { + MVT VT; + switch (N0.getValueType().getSimpleVT()) { default: assert(0 && "Unknown truncate!"); case MVT::i16: Opc = X86::MOV16to16_; @@ -1141,7 +1142,6 @@ SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT::ValueType VT) { case MVT::i32: SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3 break; - default: assert(0 && "Unknown truncate!"); break; } return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, VT, N0, SRIdx); } @@ -1149,7 +1149,7 @@ SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT::ValueType VT) { SDNode *X86DAGToDAGISel::Select(SDOperand N) { SDNode *Node = N.Val; - MVT::ValueType NVT = Node->getValueType(0); + MVT NVT = Node->getValueType(0); unsigned Opc, MOpc; unsigned Opcode = Node->getOpcode(); @@ -1183,7 +1183,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { // RIP-relative addressing. if (TM.getCodeModel() != CodeModel::Small) break; - MVT::ValueType PtrVT = TLI.getPointerTy(); + MVT PtrVT = TLI.getPointerTy(); SDOperand N0 = N.getOperand(0); SDOperand N1 = N.getOperand(1); if (N.Val->getValueType(0) == PtrVT && @@ -1224,7 +1224,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { bool isSigned = Opcode == ISD::SMUL_LOHI; if (!isSigned) - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; @@ -1232,7 +1232,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break; } else - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; @@ -1241,7 +1241,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { } unsigned LoReg, HiReg; - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break; case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break; @@ -1334,7 +1334,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { bool isSigned = Opcode == ISD::SDIVREM; if (!isSigned) - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; @@ -1342,7 +1342,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; } else - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; @@ -1352,7 +1352,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { unsigned LoReg, HiReg; unsigned ClrOpcode, SExtOpcode; - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; @@ -1493,7 +1493,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { SDOperand N0 = Node->getOperand(0); // Get the subregsiter index for the type to extend. - MVT::ValueType N0VT = N0.getValueType(); + MVT N0VT = N0.getValueType(); unsigned Idx = (N0VT == MVT::i32) ? X86::SUBREG_32BIT : (N0VT == MVT::i16) ? X86::SUBREG_16BIT : (Subtarget->is64Bit()) ? X86::SUBREG_8BIT : 0; @@ -1523,30 +1523,30 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { SDOperand N0 = Node->getOperand(0); AddToISelQueue(N0); - MVT::ValueType SVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); + MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); SDOperand TruncOp = SDOperand(getTruncate(N0, SVT), 0); unsigned Opc = 0; - switch (NVT) { + switch (NVT.getSimpleVT()) { + default: assert(0 && "Unknown sign_extend_inreg!"); case MVT::i16: if (SVT == MVT::i8) Opc = X86::MOVSX16rr8; else assert(0 && "Unknown sign_extend_inreg!"); break; case MVT::i32: - switch (SVT) { + switch (SVT.getSimpleVT()) { + default: assert(0 && "Unknown sign_extend_inreg!"); case MVT::i8: Opc = X86::MOVSX32rr8; break; case MVT::i16: Opc = X86::MOVSX32rr16; break; - default: assert(0 && "Unknown sign_extend_inreg!"); } break; case MVT::i64: - switch (SVT) { + switch (SVT.getSimpleVT()) { + default: assert(0 && "Unknown sign_extend_inreg!"); case MVT::i8: Opc = X86::MOVSX64rr8; break; case MVT::i16: Opc = X86::MOVSX64rr16; break; case MVT::i32: Opc = X86::MOVSX64rr32; break; - default: assert(0 && "Unknown sign_extend_inreg!"); } break; - default: assert(0 && "Unknown sign_extend_inreg!"); } SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c4307b8..71f0779 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -493,44 +493,44 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // will selectively turn on ones that can be effectively codegen'd. for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { - setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::VSETCC, (MVT::ValueType)VT, Expand); + setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::VSETCC, (MVT::SimpleValueType)VT, Expand); } if (Subtarget->hasMMX()) { @@ -654,13 +654,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); // Custom lower build_vector, vector_shuffle, and extract_vector_elt. - for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { + for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) { + MVT VT = (MVT::SimpleValueType)i; // Do not attempt to custom lower non-power-of-2 vectors - if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) + if (!isPowerOf2_32(VT.getVectorNumElements())) continue; - setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); + setOperationAction(ISD::BUILD_VECTOR, VT, Custom); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); } setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); @@ -675,16 +676,16 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { - setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); - setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); - setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); - setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); - setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); + setOperationAction(ISD::AND, (MVT::SimpleValueType)VT, Promote); + AddPromotedToType (ISD::AND, (MVT::SimpleValueType)VT, MVT::v2i64); + setOperationAction(ISD::OR, (MVT::SimpleValueType)VT, Promote); + AddPromotedToType (ISD::OR, (MVT::SimpleValueType)VT, MVT::v2i64); + setOperationAction(ISD::XOR, (MVT::SimpleValueType)VT, Promote); + AddPromotedToType (ISD::XOR, (MVT::SimpleValueType)VT, MVT::v2i64); + setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Promote); + AddPromotedToType (ISD::LOAD, (MVT::SimpleValueType)VT, MVT::v2i64); + setOperationAction(ISD::SELECT, (MVT::SimpleValueType)VT, Promote); + AddPromotedToType (ISD::SELECT, (MVT::SimpleValueType)VT, MVT::v2i64); } setTruncStoreAction(MVT::f64, MVT::f32, Expand); @@ -743,8 +744,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) } -MVT::ValueType -X86TargetLowering::getSetCCResultType(const SDOperand &) const { +MVT X86TargetLowering::getSetCCResultType(const SDOperand &) const { return MVT::i8; } @@ -792,7 +792,7 @@ unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { /// and store operations as a result of memset, memcpy, and memmove /// lowering. It returns MVT::iAny if SelectionDAG should be responsible for /// determining it. -MVT::ValueType +MVT X86TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align, bool isSrcConst, bool isSrcStr) const { if ((isSrcConst || isSrcStr) && Subtarget->hasSSE2() && Size >= 16) @@ -948,7 +948,7 @@ LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { - MVT::ValueType CopyVT = RVLocs[i].getValVT(); + MVT CopyVT = RVLocs[i].getValVT(); // If this is a call to a function that returns an fp value on the floating // point stack, but where we prefer to use the value in xmm registers, copy @@ -1123,7 +1123,7 @@ SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, // changed with more analysis. // In case of tail call optimization mark all arguments mutable. Since they // could be overwritten by lowering of arguments in case of a tail call. - int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, + int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8, VA.getLocMemOffset(), isImmutable); SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); if (Flags.isByVal()) @@ -1172,7 +1172,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { LastVal = VA.getValNo(); if (VA.isRegLoc()) { - MVT::ValueType RegVT = VA.getLocVT(); + MVT RegVT = VA.getLocVT(); TargetRegisterClass *RC; if (RegVT == MVT::i32) RC = X86::GR32RegisterClass; @@ -1182,10 +1182,10 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { RC = X86::FR32RegisterClass; else if (RegVT == MVT::f64) RC = X86::FR64RegisterClass; - else if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 128) + else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) RC = X86::VR128RegisterClass; - else if (MVT::isVector(RegVT)) { - assert(MVT::getSizeInBits(RegVT) == 64); + else if (RegVT.isVector()) { + assert(RegVT.getSizeInBits() == 64); if (!Is64Bit) RC = X86::VR64RegisterClass; // MMX values are passed in MMXs. else { @@ -1221,7 +1221,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { // Handle MMX values passed in GPRs. if (Is64Bit && RegVT != VA.getLocVT()) { - if (MVT::getSizeInBits(RegVT) == 64 && RC == X86::GR64RegisterClass) + if (RegVT.getSizeInBits() == 64 && RC == X86::GR64RegisterClass) ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); else if (RC == X86::VR128RegisterClass) { ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i64, ArgValue, @@ -1408,7 +1408,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, if (!IsTailCall || FPDiff==0) return Chain; // Adjust the Return address stack slot. - MVT::ValueType VT = getPointerTy(); + MVT VT = getPointerTy(); OutRetAddr = getReturnAddressFrameIndex(DAG); // Load the "old" Return address. OutRetAddr = DAG.getLoad(VT, Chain,OutRetAddr, NULL, 0); @@ -1427,7 +1427,7 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, int SlotSize = Is64Bit ? 8 : 4; int NewReturnAddrFI = MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); - MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; + MVT VT = Is64Bit ? MVT::i64 : MVT::i32; SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx, PseudoSourceValue::getFixedStack(), NewReturnAddrFI); @@ -1514,8 +1514,8 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { if (VA.isRegLoc()) { if (Is64Bit) { - MVT::ValueType RegVT = VA.getLocVT(); - if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 64) + MVT RegVT = VA.getLocVT(); + if (RegVT.isVector() && RegVT.getSizeInBits() == 64) switch (VA.getLocReg()) { default: break; @@ -1630,7 +1630,7 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { cast<ARG_FLAGSSDNode>(FlagsOp)->getArgFlags(); // Create frame index. int32_t Offset = VA.getLocMemOffset()+FPDiff; - uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; + uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); FIN = DAG.getFrameIndex(FI, getPointerTy()); @@ -2567,9 +2567,9 @@ static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, SDOperand &V2, SDOperand &Mask, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType MaskVT = Mask.getValueType(); - MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); + MVT VT = Op.getValueType(); + MVT MaskVT = Mask.getValueType(); + MVT EltVT = MaskVT.getVectorElementType(); unsigned NumElems = Mask.getNumOperands(); SmallVector<SDOperand, 8> MaskVec; @@ -2596,8 +2596,8 @@ static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, /// the two vector operands have swapped position. static SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { - MVT::ValueType MaskVT = Mask.getValueType(); - MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = Mask.getValueType(); + MVT EltVT = MaskVT.getVectorElementType(); unsigned NumElems = Mask.getNumOperands(); SmallVector<SDOperand, 8> MaskVec; for (unsigned i = 0; i != NumElems; ++i) { @@ -2756,14 +2756,13 @@ static bool isZeroShuffle(SDNode *N) { /// getZeroVector - Returns a vector of specified type with all zero elements. /// -static SDOperand getZeroVector(MVT::ValueType VT, bool HasSSE2, - SelectionDAG &DAG) { - assert(MVT::isVector(VT) && "Expected a vector type"); +static SDOperand getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG) { + assert(VT.isVector() && "Expected a vector type"); // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest // type. This ensures they get CSE'd. SDOperand Vec; - if (MVT::getSizeInBits(VT) == 64) { // MMX + if (VT.getSizeInBits() == 64) { // MMX SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); } else if (HasSSE2) { // SSE2 @@ -2778,14 +2777,14 @@ static SDOperand getZeroVector(MVT::ValueType VT, bool HasSSE2, /// getOnesVector - Returns a vector of specified type with all bits set. /// -static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { - assert(MVT::isVector(VT) && "Expected a vector type"); +static SDOperand getOnesVector(MVT VT, SelectionDAG &DAG) { + assert(VT.isVector() && "Expected a vector type"); // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest // type. This ensures they get CSE'd. SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); SDOperand Vec; - if (MVT::getSizeInBits(VT) == 64) // MMX + if (VT.getSizeInBits() == 64) // MMX Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); else // SSE Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); @@ -2822,8 +2821,8 @@ static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd /// operation of specified width. static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT BaseVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 8> MaskVec; MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); @@ -2835,8 +2834,8 @@ static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { /// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation /// of specified width. static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT BaseVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 8> MaskVec; for (unsigned i = 0, e = NumElems/2; i != e; ++i) { MaskVec.push_back(DAG.getConstant(i, BaseVT)); @@ -2848,8 +2847,8 @@ static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { /// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation /// of specified width. static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT BaseVT = MaskVT.getVectorElementType(); unsigned Half = NumElems/2; SmallVector<SDOperand, 8> MaskVec; for (unsigned i = 0; i != Half; ++i) { @@ -2864,8 +2863,8 @@ static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { /// elements in place. static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, SelectionDAG &DAG) { - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT BaseVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 8> MaskVec; // Element #0 of the result gets the elt we are replacing. MaskVec.push_back(DAG.getConstant(DestElt, BaseVT)); @@ -2876,8 +2875,8 @@ static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, /// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32. static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG, bool HasSSE2) { - MVT::ValueType PVT = HasSSE2 ? MVT::v4i32 : MVT::v4f32; - MVT::ValueType VT = Op.getValueType(); + MVT PVT = HasSSE2 ? MVT::v4i32 : MVT::v4f32; + MVT VT = Op.getValueType(); if (PVT == VT) return Op; SDOperand V1 = Op.getOperand(0); @@ -2906,12 +2905,12 @@ static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG, bool HasSSE2) { static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx, bool isZero, bool HasSSE2, SelectionDAG &DAG) { - MVT::ValueType VT = V2.getValueType(); + MVT VT = V2.getValueType(); SDOperand V1 = isZero ? getZeroVector(VT, HasSSE2, DAG) : DAG.getNode(ISD::UNDEF, VT); - unsigned NumElems = MVT::getVectorNumElements(V2.getValueType()); - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); + unsigned NumElems = V2.getValueType().getVectorNumElements(); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT EVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 16> MaskVec; for (unsigned i = 0; i != NumElems; ++i) if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. @@ -3061,11 +3060,11 @@ static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, /// getVShift - Return a vector logical shift node. /// -static SDOperand getVShift(bool isLeft, MVT::ValueType VT, SDOperand SrcOp, +static SDOperand getVShift(bool isLeft, MVT VT, SDOperand SrcOp, unsigned NumBits, SelectionDAG &DAG, const TargetLowering &TLI) { - bool isMMX = MVT::getSizeInBits(VT) == 64; - MVT::ValueType ShVT = isMMX ? MVT::v1i64 : MVT::v2i64; + bool isMMX = VT.getSizeInBits() == 64; + MVT ShVT = isMMX ? MVT::v1i64 : MVT::v2i64; unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL; SrcOp = DAG.getNode(ISD::BIT_CONVERT, ShVT, SrcOp); return DAG.getNode(ISD::BIT_CONVERT, VT, @@ -3088,9 +3087,9 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG); } - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType EVT = MVT::getVectorElementType(VT); - unsigned EVTBits = MVT::getSizeInBits(EVT); + MVT VT = Op.getValueType(); + MVT EVT = VT.getVectorElementType(); + unsigned EVTBits = EVT.getSizeInBits(); unsigned NumElems = Op.getNumOperands(); unsigned NumZero = 0; @@ -3133,8 +3132,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { (!IsAllConstants || Idx == 0)) { if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { // Handle MMX and SSE both. - MVT::ValueType VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32; - MVT::ValueType VecElts = VT == MVT::v2i64 ? 4 : 2; + MVT VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32; + unsigned VecElts = VT == MVT::v2i64 ? 4 : 2; // Truncate the value (which may itself be a constant) to i32, and // convert it to a vector with movd (S2V+shuffle to zero extend). @@ -3173,7 +3172,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { // Is it a vector logical left shift? if (NumElems == 2 && Idx == 1 && isZeroNode(Op.getOperand(0)) && !isZeroNode(Op.getOperand(1))) { - unsigned NumBits = MVT::getSizeInBits(VT); + unsigned NumBits = VT.getSizeInBits(); return getVShift(true, VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(1)), NumBits/2, DAG, *this); @@ -3193,8 +3192,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { // Turn it into a shuffle of zero and zero-extended scalar to vector. Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget->hasSSE2(), DAG); - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT MaskEVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 8> MaskVec; for (unsigned i = 0; i < NumElems; i++) MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); @@ -3273,8 +3272,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { } } - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT EVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 8> MaskVec; bool Reverse = (NonZeros & 0x3) == 2; for (unsigned i = 0; i < 2; ++i) @@ -3320,9 +3319,9 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, SDOperand PermMask, SelectionDAG &DAG, TargetLowering &TLI) { SDOperand NewV; - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); - MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); - MVT::ValueType PtrVT = TLI.getPointerTy(); + MVT MaskVT = MVT::getIntVectorWithNumElements(8); + MVT MaskEVT = MaskVT.getVectorElementType(); + MVT PtrVT = TLI.getPointerTy(); SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), PermMask.Val->op_end()); @@ -3562,23 +3561,23 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, /// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> static SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, - MVT::ValueType VT, + MVT VT, SDOperand PermMask, SelectionDAG &DAG, TargetLowering &TLI) { unsigned NumElems = PermMask.getNumOperands(); unsigned NewWidth = (NumElems == 4) ? 2 : 4; - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); - MVT::ValueType NewVT = MaskVT; - switch (VT) { + MVT MaskVT = MVT::getIntVectorWithNumElements(NewWidth); + MVT NewVT = MaskVT; + switch (VT.getSimpleVT()) { + default: assert(false && "Unexpected!"); case MVT::v4f32: NewVT = MVT::v2f64; break; case MVT::v4i32: NewVT = MVT::v2i64; break; case MVT::v8i16: NewVT = MVT::v4i32; break; case MVT::v16i8: NewVT = MVT::v4i32; break; - default: assert(false && "Unexpected!"); } if (NewWidth == 2) { - if (MVT::isInteger(VT)) + if (VT.isInteger()) NewVT = MVT::v2i64; else NewVT = MVT::v2f64; @@ -3612,9 +3611,9 @@ SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, /// getVZextMovL - Return a zero-extending vector move low node. /// -static SDOperand getVZextMovL(MVT::ValueType VT, MVT::ValueType OpVT, - SDOperand SrcOp, SelectionDAG &DAG, - const X86Subtarget *Subtarget) { +static SDOperand getVZextMovL(MVT VT, MVT OpVT, + SDOperand SrcOp, SelectionDAG &DAG, + const X86Subtarget *Subtarget) { if (VT == MVT::v2f64 || VT == MVT::v4f32) { LoadSDNode *LD = NULL; if (!isScalarLoadToVector(SrcOp.Val, &LD)) @@ -3622,7 +3621,7 @@ static SDOperand getVZextMovL(MVT::ValueType VT, MVT::ValueType OpVT, if (!LD) { // movssrr and movsdrr do not clear top bits. Try to use movd, movq // instead. - MVT::ValueType EVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; + MVT EVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; if ((EVT != MVT::i64 || Subtarget->is64Bit()) && SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT && @@ -3647,9 +3646,9 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { SDOperand V1 = Op.getOperand(0); SDOperand V2 = Op.getOperand(1); SDOperand PermMask = Op.getOperand(2); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); unsigned NumElems = PermMask.getNumOperands(); - bool isMMX = MVT::getSizeInBits(VT) == 64; + bool isMMX = VT.getSizeInBits() == 64; bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; bool V1IsSplat = false; @@ -3710,8 +3709,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { if (isShift && ShVal.hasOneUse()) { // If the shifted value has multiple uses, it may be cheaper to use // v_set0 + movlhps or movhlps, etc. - MVT::ValueType EVT = MVT::getVectorElementType(VT); - ShAmt *= MVT::getSizeInBits(EVT); + MVT EVT = VT.getVectorElementType(); + ShAmt *= EVT.getSizeInBits(); return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this); } @@ -3736,8 +3735,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { if (isShift) { // No better options. Use a vshl / vsrl. - MVT::ValueType EVT = MVT::getVectorElementType(VT); - ShAmt *= MVT::getSizeInBits(EVT); + MVT EVT = VT.getVectorElementType(); + ShAmt *= EVT.getSizeInBits(); return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this); } @@ -3821,7 +3820,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { (X86::isPSHUFDMask(PermMask.Val) || X86::isPSHUFHWMask(PermMask.Val) || X86::isPSHUFLWMask(PermMask.Val))) { - MVT::ValueType RVT = VT; + MVT RVT = VT; if (VT == MVT::v4f32) { RVT = MVT::v4i32; Op = DAG.getNode(ISD::VECTOR_SHUFFLE, RVT, @@ -3851,8 +3850,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { // Handle all 4 wide cases with a number of shuffles. if (NumElems == 4 && !isMMX) { // Don't do this for MMX. - MVT::ValueType MaskVT = PermMask.getValueType(); - MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = PermMask.getValueType(); + MVT MaskEVT = MaskVT.getVectorElementType(); SmallVector<std::pair<int, int>, 8> Locs; Locs.reserve(NumElems); SmallVector<SDOperand, 8> Mask1(NumElems, @@ -3959,14 +3958,14 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { SDOperand X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - if (MVT::getSizeInBits(VT) == 8) { + MVT VT = Op.getValueType(); + if (VT.getSizeInBits() == 8) { SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, Op.getOperand(0), Op.getOperand(1)); SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, VT, Assert); - } else if (MVT::getSizeInBits(VT) == 16) { + } else if (VT.getSizeInBits() == 16) { SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, Op.getOperand(0), Op.getOperand(1)); SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, @@ -4003,9 +4002,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { return Res; } - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); // TODO: handle v16i8. - if (MVT::getSizeInBits(VT) == 16) { + if (VT.getSizeInBits() == 16) { SDOperand Vec = Op.getOperand(0); unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); if (Idx == 0) @@ -4014,27 +4013,27 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), Op.getOperand(1))); // Transform it so it match pextrw which produces a 32-bit result. - MVT::ValueType EVT = (MVT::ValueType)(VT+1); + MVT EVT = (MVT::SimpleValueType)(VT.getSimpleVT()+1); SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, Op.getOperand(0), Op.getOperand(1)); SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, VT, Assert); - } else if (MVT::getSizeInBits(VT) == 32) { + } else if (VT.getSizeInBits() == 32) { unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); if (Idx == 0) return Op; // SHUFPS the element to the lowest double word, then movss. - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); + MVT MaskVT = MVT::getIntVectorWithNumElements(4); SmallVector<SDOperand, 8> IdxVec; IdxVec. - push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); + push_back(DAG.getConstant(Idx, MaskVT.getVectorElementType())); IdxVec. - push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); + push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType())); IdxVec. - push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); + push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType())); IdxVec. - push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); + push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType())); SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &IdxVec[0], IdxVec.size()); SDOperand Vec = Op.getOperand(0); @@ -4042,7 +4041,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, DAG.getIntPtrConstant(0)); - } else if (MVT::getSizeInBits(VT) == 64) { + } else if (VT.getSizeInBits() == 64) { // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught // to match extract_elt for f64. @@ -4053,11 +4052,11 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { // UNPCKHPD the element to the lowest double word, then movsd. // Note if the lower 64 bits of the result of the UNPCKHPD is then stored // to a f64mem, the whole operation is folded into a single MOVHPDmr. - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); + MVT MaskVT = MVT::getIntVectorWithNumElements(4); SmallVector<SDOperand, 8> IdxVec; - IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); + IdxVec.push_back(DAG.getConstant(1, MaskVT.getVectorElementType())); IdxVec. - push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); + push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType())); SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &IdxVec[0], IdxVec.size()); SDOperand Vec = Op.getOperand(0); @@ -4072,15 +4071,15 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { SDOperand X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType EVT = MVT::getVectorElementType(VT); + MVT VT = Op.getValueType(); + MVT EVT = VT.getVectorElementType(); SDOperand N0 = Op.getOperand(0); SDOperand N1 = Op.getOperand(1); SDOperand N2 = Op.getOperand(2); - if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) { - unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB + if ((EVT.getSizeInBits() == 8) || (EVT.getSizeInBits() == 16)) { + unsigned Opc = (EVT.getSizeInBits() == 8) ? X86ISD::PINSRB : X86ISD::PINSRW; // Transform it so it match pinsr{b,w} which expects a GR32 as its second // argument. @@ -4106,8 +4105,8 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ SDOperand X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType EVT = MVT::getVectorElementType(VT); + MVT VT = Op.getValueType(); + MVT EVT = VT.getVectorElementType(); if (Subtarget->hasSSE41()) return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); @@ -4119,7 +4118,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { SDOperand N1 = Op.getOperand(1); SDOperand N2 = Op.getOperand(2); - if (MVT::getSizeInBits(EVT) == 16) { + if (EVT.getSizeInBits() == 16) { // Transform it so it match pinsrw which expects a 16-bit value in a GR32 // as its second argument. if (N1.getValueType() != MVT::i32) @@ -4134,8 +4133,8 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { SDOperand X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); - MVT::ValueType VT = MVT::v2i32; - switch (Op.getValueType()) { + MVT VT = MVT::v2i32; + switch (Op.getValueType().getSimpleVT()) { default: break; case MVT::v16i8: case MVT::v8i16: @@ -4201,7 +4200,7 @@ X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit static SDOperand LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, - const MVT::ValueType PtrVT) { + const MVT PtrVT) { SDOperand InFlag; SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, DAG.getNode(X86ISD::GlobalBaseReg, @@ -4239,7 +4238,7 @@ LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit static SDOperand LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, - const MVT::ValueType PtrVT) { + const MVT PtrVT) { SDOperand InFlag, Chain; // emit leaq symbol@TLSGD(%rip), %rdi @@ -4271,9 +4270,8 @@ LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, // Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or // "local exec" model. -static SDOperand -LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, - const MVT::ValueType PtrVT) { +static SDOperand LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, + const MVT PtrVT) { // Get the Thread Pointer SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial @@ -4346,8 +4344,8 @@ SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { /// take a 2 x i32 value to shift plus a shift amount. SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { assert(Op.getNumOperands() == 3 && "Not a double-shift!"); - MVT::ValueType VT = Op.getValueType(); - unsigned VTBits = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + unsigned VTBits = VT.getSizeInBits(); bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; SDOperand ShOpLo = Op.getOperand(0); SDOperand ShOpHi = Op.getOperand(1); @@ -4365,7 +4363,7 @@ SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt); } - const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); + const MVT *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, DAG.getConstant(VTBits, MVT::i8)); SDOperand Cond = DAG.getNode(X86ISD::CMP, VT, @@ -4411,7 +4409,7 @@ SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { } SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); + MVT SrcVT = Op.getOperand(0).getValueType(); assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && "Unknown SINT_TO_FP to lower!"); @@ -4422,7 +4420,7 @@ SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { Subtarget->is64Bit()) return SDOperand(); - unsigned Size = MVT::getSizeInBits(SrcVT)/8; + unsigned Size = SrcVT.getSizeInBits()/8; MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); @@ -4487,11 +4485,11 @@ FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary // stack slot. MachineFunction &MF = DAG.getMachineFunction(); - unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; + unsigned MemSize = Op.getValueType().getSizeInBits()/8; int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); unsigned Opc; - switch (Op.getValueType()) { + switch (Op.getValueType().getSimpleVT()) { default: assert(0 && "Invalid FP_TO_SINT to lower!"); case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; @@ -4543,10 +4541,10 @@ SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { } SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType EltVT = VT; - if (MVT::isVector(VT)) - EltVT = MVT::getVectorElementType(VT); + MVT VT = Op.getValueType(); + MVT EltVT = VT; + if (VT.isVector()) + EltVT = VT.getVectorElementType(); std::vector<Constant*> CV; if (EltVT == MVT::f64) { Constant *C = ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63)))); @@ -4568,12 +4566,12 @@ SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { } SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType EltVT = VT; + MVT VT = Op.getValueType(); + MVT EltVT = VT; unsigned EltNum = 1; - if (MVT::isVector(VT)) { - EltVT = MVT::getVectorElementType(VT); - EltNum = MVT::getVectorNumElements(VT); + if (VT.isVector()) { + EltVT = VT.getVectorElementType(); + EltNum = VT.getVectorNumElements(); } std::vector<Constant*> CV; if (EltVT == MVT::f64) { @@ -4592,7 +4590,7 @@ SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, PseudoSourceValue::getConstantPool(), 0, false, 16); - if (MVT::isVector(VT)) { + if (VT.isVector()) { return DAG.getNode(ISD::BIT_CONVERT, VT, DAG.getNode(ISD::XOR, MVT::v2i64, DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), @@ -4605,16 +4603,16 @@ SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { SDOperand Op0 = Op.getOperand(0); SDOperand Op1 = Op.getOperand(1); - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType SrcVT = Op1.getValueType(); + MVT VT = Op.getValueType(); + MVT SrcVT = Op1.getValueType(); // If second operand is smaller, extend it first. - if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { + if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); SrcVT = VT; } // And if it is bigger, shrink it first. - if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { + if (SrcVT.getSizeInBits() > VT.getSizeInBits()) { Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); SrcVT = VT; } @@ -4641,7 +4639,7 @@ SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); // Shift sign bit right or left if the two operands have different types. - if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { + if (SrcVT.getSizeInBits() > VT.getSizeInBits()) { // Op0 is MVT::f32, Op1 is MVT::f64. SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, @@ -4680,7 +4678,7 @@ SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { SDOperand Op1 = Op.getOperand(1); SDOperand CC = Op.getOperand(2); ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); - bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); + bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); unsigned X86CC; if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, @@ -4728,10 +4726,10 @@ SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { SDOperand Cmp = Cond.getOperand(1); unsigned Opc = Cmp.getOpcode(); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); bool IllegalFPCMov = false; - if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && + if (VT.isFloatingPoint() && !VT.isVector() && !isScalarFPTypeInSSEReg(VT)) // FPStack? IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); @@ -4748,7 +4746,7 @@ SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); } - const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), + const MVT *VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag); SmallVector<SDOperand, 4> Ops; // X86ISD::CMOV means set the result (which is operand 1) to the RHS if @@ -4812,8 +4810,8 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, SDOperand Flag; - MVT::ValueType IntPtr = getPointerTy(); - MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; + MVT IntPtr = getPointerTy(); + MVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); Flag = Chain.getValue(1); @@ -4828,7 +4826,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); - std::vector<MVT::ValueType> Tys; + std::vector<MVT> Tys; Tys.push_back(SPTy); Tys.push_back(MVT::Other); SDOperand Ops1[2] = { Chain.getValue(0), Chain }; @@ -4855,7 +4853,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src); if (const char *bzeroEntry = V && V->isNullValue() ? Subtarget->getBZeroEntry() : 0) { - MVT::ValueType IntPtr = getPointerTy(); + MVT IntPtr = getPointerTy(); const Type *IntPtrTy = getTargetData()->getIntPtrType(); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; @@ -4877,7 +4875,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, uint64_t SizeVal = ConstantSize->getValue(); SDOperand InFlag(0, 0); - MVT::ValueType AVT; + MVT AVT; SDOperand Count; ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src); unsigned BytesLeft = 0; @@ -4912,7 +4910,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, } if (AVT > MVT::i8) { - unsigned UBytes = MVT::getSizeInBits(AVT) / 8; + unsigned UBytes = AVT.getSizeInBits() / 8; Count = DAG.getIntPtrConstant(SizeVal / UBytes); BytesLeft = SizeVal % UBytes; } @@ -4944,7 +4942,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, if (TwoRepStos) { InFlag = Chain.getValue(1); Count = Size; - MVT::ValueType CVT = Count.getValueType(); + MVT CVT = Count.getValueType(); SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, @@ -4959,8 +4957,8 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, } else if (BytesLeft) { // Handle the last 1 - 7 bytes. unsigned Offset = SizeVal - BytesLeft; - MVT::ValueType AddrVT = Dst.getValueType(); - MVT::ValueType SizeVT = Size.getValueType(); + MVT AddrVT = Dst.getValueType(); + MVT SizeVT = Size.getValueType(); Chain = DAG.getMemset(Chain, DAG.getNode(ISD::ADD, AddrVT, Dst, @@ -4992,7 +4990,7 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold()) return SDOperand(); - MVT::ValueType AVT; + MVT AVT; unsigned BytesLeft = 0; if (Align >= 8 && Subtarget->is64Bit()) AVT = MVT::i64; @@ -5003,7 +5001,7 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, else AVT = MVT::i8; - unsigned UBytes = MVT::getSizeInBits(AVT) / 8; + unsigned UBytes = AVT.getSizeInBits() / 8; unsigned CountVal = SizeVal / UBytes; SDOperand Count = DAG.getIntPtrConstant(CountVal); BytesLeft = SizeVal % UBytes; @@ -5031,9 +5029,9 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, if (BytesLeft) { // Handle the last 1 - 7 bytes. unsigned Offset = SizeVal - BytesLeft; - MVT::ValueType DstVT = Dst.getValueType(); - MVT::ValueType SrcVT = Src.getValueType(); - MVT::ValueType SizeVT = Size.getValueType(); + MVT DstVT = Dst.getValueType(); + MVT SrcVT = Src.getValueType(); + MVT SizeVT = Size.getValueType(); Results.push_back(DAG.getMemcpy(Chain, DAG.getNode(ISD::ADD, DstVT, Dst, DAG.getConstant(Offset, DstVT)), @@ -5280,7 +5278,7 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { return SDOperand(); unsigned NewIntNo = 0; - MVT::ValueType ShAmtVT = MVT::v4i32; + MVT ShAmtVT = MVT::v4i32; switch (IntNo) { case Intrinsic::x86_sse2_pslli_w: NewIntNo = Intrinsic::x86_sse2_psll_w; @@ -5338,7 +5336,7 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { break; } } - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); ShAmt = DAG.getNode(ISD::BIT_CONVERT, VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, ShAmtVT, ShAmt)); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VT, @@ -5554,7 +5552,7 @@ SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { const TargetMachine &TM = MF.getTarget(); const TargetFrameInfo &TFI = *TM.getFrameInfo(); unsigned StackAlignment = TFI.getStackAlignment(); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); // Save FP Control Word to stack slot int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); @@ -5586,14 +5584,14 @@ SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { DAG.getConstant(3, MVT::i16)); - return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? + return DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); } SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType OpVT = VT; - unsigned NumBits = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + MVT OpVT = VT; + unsigned NumBits = VT.getSizeInBits(); Op = Op.getOperand(0); if (VT == MVT::i8) { @@ -5623,9 +5621,9 @@ SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { } SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType OpVT = VT; - unsigned NumBits = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + MVT OpVT = VT; + unsigned NumBits = VT.getSizeInBits(); Op = Op.getOperand(0); if (VT == MVT::i8) { @@ -5651,10 +5649,12 @@ SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { } SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType T = cast<AtomicSDNode>(Op.Val)->getVT(); + MVT T = cast<AtomicSDNode>(Op.Val)->getVT(); unsigned Reg = 0; unsigned size = 0; - switch(T) { + switch(T.getSimpleVT()) { + default: + assert(false && "Invalid value type!"); case MVT::i8: Reg = X86::AL; size = 1; break; case MVT::i16: Reg = X86::AX; size = 2; break; case MVT::i32: Reg = X86::EAX; size = 4; break; @@ -5680,7 +5680,7 @@ SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { } SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { - MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); + MVT T = cast<AtomicSDNode>(Op)->getVT(); assert (T == MVT::i64 && "Only know how to expand i64 CAS"); SDOperand cpInL, cpInH; cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), @@ -5716,7 +5716,7 @@ SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { } SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) { - MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); + MVT T = cast<AtomicSDNode>(Op)->getVT(); assert (T == MVT::i32 && "Only know how to expand i32 LSS"); SDOperand negOp = DAG.getNode(ISD::SUB, T, DAG.getConstant(0, T), Op->getOperand(2)); @@ -5900,12 +5900,11 @@ bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { return Subtarget->is64Bit() || NumBits1 < 64; } -bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, - MVT::ValueType VT2) const { - if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) +bool X86TargetLowering::isTruncateFree(MVT VT1, MVT VT2) const { + if (!VT1.isInteger() || !VT2.isInteger()) return false; - unsigned NumBits1 = MVT::getSizeInBits(VT1); - unsigned NumBits2 = MVT::getSizeInBits(VT2); + unsigned NumBits1 = VT1.getSizeInBits(); + unsigned NumBits2 = VT2.getSizeInBits(); if (NumBits1 <= NumBits2) return false; return Subtarget->is64Bit() || NumBits1 < 64; @@ -5916,9 +5915,9 @@ bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values /// are assumed to be legal. bool -X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { +X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT VT) const { // Only do shuffles on 128-bit vector types for now. - if (MVT::getSizeInBits(VT) == 64) return false; + if (VT.getSizeInBits() == 64) return false; return (Mask.Val->getNumOperands() <= 4 || isIdentityMask(Mask.Val) || isIdentityMask(Mask.Val, true) || @@ -5932,11 +5931,10 @@ X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { bool X86TargetLowering::isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps, - MVT::ValueType EVT, - SelectionDAG &DAG) const { + MVT EVT, SelectionDAG &DAG) const { unsigned NumElts = BVOps.size(); // Only do shuffles on 128-bit vector types for now. - if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; + if (EVT.getSizeInBits() * NumElts == 64) return false; if (NumElts == 2) return true; if (NumElts == 4) { return (isMOVLMask(&BVOps[0], 4) || @@ -6342,7 +6340,7 @@ static bool isBaseAlignmentOfN(unsigned N, SDNode *Base, } static bool EltsFromConsecutiveLoads(SDNode *N, SDOperand PermMask, - unsigned NumElems, MVT::ValueType EVT, + unsigned NumElems, MVT EVT, SDNode *&Base, SelectionDAG &DAG, MachineFrameInfo *MFI, const TargetLowering &TLI) { @@ -6370,7 +6368,7 @@ static bool EltsFromConsecutiveLoads(SDNode *N, SDOperand PermMask, continue; if (!TLI.isConsecutiveLoad(Elt.Val, Base, - MVT::getSizeInBits(EVT)/8, i, MFI)) + EVT.getSizeInBits()/8, i, MFI)) return false; } return true; @@ -6383,8 +6381,8 @@ static bool EltsFromConsecutiveLoads(SDNode *N, SDOperand PermMask, static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI) { MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType EVT = MVT::getVectorElementType(VT); + MVT VT = N->getValueType(0); + MVT EVT = VT.getVectorElementType(); SDOperand PermMask = N->getOperand(2); unsigned NumElems = PermMask.getNumOperands(); SDNode *Base = NULL; @@ -6411,8 +6409,8 @@ static SDOperand PerformBuildVectorCombine(SDNode *N, SelectionDAG &DAG, if (NumOps == 1) return SDOperand(); - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType EVT = MVT::getVectorElementType(VT); + MVT VT = N->getValueType(0); + MVT EVT = VT.getVectorElementType(); if ((EVT != MVT::i64 && EVT != MVT::f64) || Subtarget->is64Bit()) // We are looking for load i64 and zero extend. We want to transform // it before legalizer has a chance to expand it. Also look for i64 @@ -6523,8 +6521,8 @@ static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, // A preferable solution to the general problem is to figure out the right // places to insert EMMS. This qualifies as a quick hack. StoreSDNode *St = cast<StoreSDNode>(N); - if (MVT::isVector(St->getValue().getValueType()) && - MVT::getSizeInBits(St->getValue().getValueType()) == 64 && + if (St->getValue().getValueType().isVector() && + St->getValue().getValueType().getSizeInBits() == 64 && isa<LoadSDNode>(St->getValue()) && !cast<LoadSDNode>(St->getValue())->isVolatile() && St->getChain().hasOneUse() && !St->isVolatile()) { @@ -6569,7 +6567,7 @@ static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, // Otherwise, lower to two 32-bit copies. SDOperand LoAddr = Ld->getBasePtr(); SDOperand HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, - DAG.getConstant(MVT::i32, 4)); + DAG.getConstant(4, MVT::i32)); SDOperand LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr, Ld->getSrcValue(), Ld->getSrcValueOffset(), @@ -6589,7 +6587,7 @@ static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, LoAddr = St->getBasePtr(); HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, - DAG.getConstant(MVT::i32, 4)); + DAG.getConstant(4, MVT::i32)); SDOperand LoSt = DAG.getStore(NewChain, LoLd, LoAddr, St->getSrcValue(), St->getSrcValueOffset(), @@ -6683,10 +6681,10 @@ X86TargetLowering::getConstraintType(const std::string &Constraint) const { /// with another that has more specific requirements based on the type of the /// corresponding operand. const char *X86TargetLowering:: -LowerXConstraint(MVT::ValueType ConstraintVT) const { +LowerXConstraint(MVT ConstraintVT) const { // FP X constraints get lowered to SSE1/2 registers if available, otherwise // 'f' like normal targets. - if (MVT::isFloatingPoint(ConstraintVT)) { + if (ConstraintVT.isFloatingPoint()) { if (Subtarget->hasSSE2()) return "Y"; if (Subtarget->hasSSE1()) @@ -6779,7 +6777,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, std::vector<unsigned> X86TargetLowering:: getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { if (Constraint.size() == 1) { // FIXME: not handling fp-stack yet! switch (Constraint[0]) { // GCC X86 Constraint Letters @@ -6807,7 +6805,7 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint, std::pair<unsigned, const TargetRegisterClass*> X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { // First, see if this is a constraint that directly corresponds to an LLVM // register class. if (Constraint.size() == 1) { @@ -6843,8 +6841,8 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, // FALL THROUGH. case 'x': // SSE_REGS if SSE1 allowed if (!Subtarget->hasSSE1()) break; - - switch (VT) { + + switch (VT.getSimpleVT()) { default: break; // Scalar SSE types. case MVT::f32: diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 0c67794..c7a43a4 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -345,8 +345,8 @@ namespace llvm { /// lowering. It returns MVT::iAny if SelectionDAG should be responsible for /// determining it. virtual - MVT::ValueType getOptimalMemOpType(uint64_t Size, unsigned Align, - bool isSrcConst, bool isSrcStr) const; + MVT getOptimalMemOpType(uint64_t Size, unsigned Align, + bool isSrcConst, bool isSrcStr) const; /// LowerOperation - Provide custom lowering hooks for some operations. /// @@ -369,7 +369,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ISD::SETCC ValueType - virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDOperand &) const; /// computeMaskedBitsForTargetNode - Determine which of the bits specified /// in Mask are known to be either zero or one and return them in the @@ -390,9 +390,9 @@ namespace llvm { std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; - virtual const char *LowerXConstraint(MVT::ValueType ConstraintVT) const; + virtual const char *LowerXConstraint(MVT ConstraintVT) const; /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. @@ -407,7 +407,7 @@ namespace llvm { /// error, this returns a register number of 0. std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. @@ -417,26 +417,25 @@ namespace llvm { /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in /// register EAX to i16 by referencing its sub-register AX. virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const; - virtual bool isTruncateFree(MVT::ValueType VT1, MVT::ValueType VT2) const; + virtual bool isTruncateFree(MVT VT1, MVT VT2) const; /// isShuffleMaskLegal - Targets can use this to indicate that they only /// support *some* VECTOR_SHUFFLE operations, those with specific masks. /// By default, if a target supports the VECTOR_SHUFFLE node, all mask /// values are assumed to be legal. - virtual bool isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const; + virtual bool isShuffleMaskLegal(SDOperand Mask, MVT VT) const; /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is /// used by Targets can use this to indicate if there is a suitable /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant /// pool entry. virtual bool isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps, - MVT::ValueType EVT, - SelectionDAG &DAG) const; + MVT EVT, SelectionDAG &DAG) const; /// ShouldShrinkFPConstant - If true, then instruction selection should /// seek to shrink the FP constant of the specified type to a smaller type /// in order to save space and / or reduce runtime. - virtual bool ShouldShrinkFPConstant(MVT::ValueType VT) const { + virtual bool ShouldShrinkFPConstant(MVT VT) const { // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more // expensive than a straight movsd. On the other hand, it's important to // shrink long double fp constant since fldt is very slow. @@ -456,7 +455,7 @@ namespace llvm { /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is /// computed in an SSE register, not on the X87 floating point stack. - bool isScalarFPTypeInSSEReg(MVT::ValueType VT) const { + bool isScalarFPTypeInSSEReg(MVT VT) const { return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 } diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 1a5fd47..4384426 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -2193,14 +2193,14 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, // Emit the load instruction. SDNode *Load = 0; if (FoldedLoad) { - MVT::ValueType VT = *RC->vt_begin(); + MVT VT = *RC->vt_begin(); Load = DAG.getTargetNode(getLoadRegOpcode(RC, RI.getStackAlignment()), VT, MVT::Other, &AddrOps[0], AddrOps.size()); NewNodes.push_back(Load); } // Emit the data processing instruction. - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; const TargetRegisterClass *DstRC = 0; if (TID.getNumDefs() > 0) { const TargetOperandInfo &DstTOI = TID.OpInfo[0]; @@ -2209,7 +2209,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, VTs.push_back(*DstRC->vt_begin()); } for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { - MVT::ValueType VT = N->getValueType(i); + MVT VT = N->getValueType(i); if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs()) VTs.push_back(VT); } diff --git a/lib/Target/X86/X86IntelAsmPrinter.cpp b/lib/Target/X86/X86IntelAsmPrinter.cpp index a1031bd..09a40b8 100644 --- a/lib/Target/X86/X86IntelAsmPrinter.cpp +++ b/lib/Target/X86/X86IntelAsmPrinter.cpp @@ -120,7 +120,7 @@ void X86IntelAsmPrinter::printOp(const MachineOperand &MO, if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { unsigned Reg = MO.getReg(); if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) { - MVT::ValueType VT = (strcmp(Modifier,"subreg64") == 0) ? + MVT VT = (strcmp(Modifier,"subreg64") == 0) ? MVT::i64 : ((strcmp(Modifier, "subreg32") == 0) ? MVT::i32 : ((strcmp(Modifier,"subreg16") == 0) ? MVT::i16 :MVT::i8)); Reg = getX86SubSuperRegister(Reg, VT); diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index a75c1de..0bac850 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -946,8 +946,8 @@ unsigned X86RegisterInfo::getEHHandlerRegister() const { } namespace llvm { -unsigned getX86SubSuperRegister(unsigned Reg, MVT::ValueType VT, bool High) { - switch (VT) { +unsigned getX86SubSuperRegister(unsigned Reg, MVT VT, bool High) { + switch (VT.getSimpleVT()) { default: return Reg; case MVT::i8: if (High) { diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h index 40fdfb0..93e8613 100644 --- a/lib/Target/X86/X86RegisterInfo.h +++ b/lib/Target/X86/X86RegisterInfo.h @@ -150,7 +150,7 @@ public: // getX86SubSuperRegister - X86 utility function. It returns the sub or super // register of a specific X86 register. // e.g. getX86SubSuperRegister(X86::EAX, MVT::i16) return X86:AX -unsigned getX86SubSuperRegister(unsigned, MVT::ValueType, bool High=false); +unsigned getX86SubSuperRegister(unsigned, MVT, bool High=false); } // End llvm namespace |