diff options
68 files changed, 2862 insertions, 2845 deletions
diff --git a/docs/CodeGenerator.html b/docs/CodeGenerator.html index bcbb64c..a283981 100644 --- a/docs/CodeGenerator.html +++ b/docs/CodeGenerator.html @@ -790,7 +790,8 @@ to the node defining the used value. Because nodes may define multiple values, edges are represented by instances of the <tt>SDOperand</tt> class, which is a <tt><SDNode, unsigned></tt> pair, indicating the node and result value being used, respectively. Each value produced by an <tt>SDNode</tt> has -an associated <tt>MVT::ValueType</tt> indicating what type the value is.</p> +an associated <tt>MVT</tt> (Machine Value Type) indicating what the type of the +value is.</p> <p>SelectionDAGs contain two different kinds of values: those that represent data flow and those that represent control flow dependencies. Data values are diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h index 584cb19..2fda9e5 100644 --- a/include/llvm/CodeGen/CallingConvLower.h +++ b/include/llvm/CodeGen/CallingConvLower.h @@ -49,14 +49,14 @@ private: LocInfo HTP : 7; /// ValVT - The type of the value being assigned. - MVT::ValueType ValVT; + MVT ValVT; /// LocVT - The type of the location being assigned to. - MVT::ValueType LocVT; + MVT LocVT; public: - static CCValAssign getReg(unsigned ValNo, MVT::ValueType ValVT, - unsigned RegNo, MVT::ValueType LocVT, + static CCValAssign getReg(unsigned ValNo, MVT ValVT, + unsigned RegNo, MVT LocVT, LocInfo HTP) { CCValAssign Ret; Ret.ValNo = ValNo; @@ -67,8 +67,8 @@ public: Ret.LocVT = LocVT; return Ret; } - static CCValAssign getMem(unsigned ValNo, MVT::ValueType ValVT, - unsigned Offset, MVT::ValueType LocVT, + static CCValAssign getMem(unsigned ValNo, MVT ValVT, + unsigned Offset, MVT LocVT, LocInfo HTP) { CCValAssign Ret; Ret.ValNo = ValNo; @@ -81,14 +81,14 @@ public: } unsigned getValNo() const { return ValNo; } - MVT::ValueType getValVT() const { return ValVT; } + MVT getValVT() const { return ValVT; } bool isRegLoc() const { return !isMem; } bool isMemLoc() const { return isMem; } unsigned getLocReg() const { assert(isRegLoc()); return Loc; } unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; } - MVT::ValueType getLocVT() const { return LocVT; } + MVT getLocVT() const { return LocVT; } LocInfo getLocInfo() const { return HTP; } }; @@ -96,8 +96,8 @@ public: /// CCAssignFn - This function assigns a location for Val, updating State to /// reflect the change. -typedef bool CCAssignFn(unsigned ValNo, MVT::ValueType ValVT, - MVT::ValueType LocVT, CCValAssign::LocInfo LocInfo, +typedef bool CCAssignFn(unsigned ValNo, MVT ValVT, + MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State); @@ -217,8 +217,8 @@ public: // HandleByVal - Allocate a stack slot large enough to pass an argument by // value. The size and alignment information of the argument is encoded in its // parameter attribute. - void HandleByVal(unsigned ValNo, MVT::ValueType ValVT, - MVT::ValueType LocVT, CCValAssign::LocInfo LocInfo, + void HandleByVal(unsigned ValNo, MVT ValVT, + MVT LocVT, CCValAssign::LocInfo LocInfo, int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags); private: diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h index 77405e4..e11bd42 100644 --- a/include/llvm/CodeGen/SelectionDAG.h +++ b/include/llvm/CodeGen/SelectionDAG.h @@ -144,24 +144,22 @@ public: /// getVTList - Return an SDVTList that represents the list of values /// specified. - SDVTList getVTList(MVT::ValueType VT); - SDVTList getVTList(MVT::ValueType VT1, MVT::ValueType VT2); - SDVTList getVTList(MVT::ValueType VT1, MVT::ValueType VT2,MVT::ValueType VT3); - SDVTList getVTList(const MVT::ValueType *VTs, unsigned NumVTs); + SDVTList getVTList(MVT VT); + SDVTList getVTList(MVT VT1, MVT VT2); + SDVTList getVTList(MVT VT1, MVT VT2, MVT VT3); + SDVTList getVTList(const MVT *VTs, unsigned NumVTs); /// getNodeValueTypes - These are obsolete, use getVTList instead. - const MVT::ValueType *getNodeValueTypes(MVT::ValueType VT) { + const MVT *getNodeValueTypes(MVT VT) { return getVTList(VT).VTs; } - const MVT::ValueType *getNodeValueTypes(MVT::ValueType VT1, - MVT::ValueType VT2) { + const MVT *getNodeValueTypes(MVT VT1, MVT VT2) { return getVTList(VT1, VT2).VTs; } - const MVT::ValueType *getNodeValueTypes(MVT::ValueType VT1,MVT::ValueType VT2, - MVT::ValueType VT3) { + const MVT *getNodeValueTypes(MVT VT1, MVT VT2, MVT VT3) { return getVTList(VT1, VT2, VT3).VTs; } - const MVT::ValueType *getNodeValueTypes(std::vector<MVT::ValueType> &vtList) { + const MVT *getNodeValueTypes(std::vector<MVT> &vtList) { return getVTList(&vtList[0], (unsigned)vtList.size()).VTs; } @@ -170,57 +168,56 @@ public: // Node creation methods. // SDOperand getString(const std::string &Val); - SDOperand getConstant(uint64_t Val, MVT::ValueType VT, bool isTarget = false); - SDOperand getConstant(const APInt &Val, MVT::ValueType VT, bool isTarget = false); + SDOperand getConstant(uint64_t Val, MVT VT, bool isTarget = false); + SDOperand getConstant(const APInt &Val, MVT VT, bool isTarget = false); SDOperand getIntPtrConstant(uint64_t Val, bool isTarget = false); - SDOperand getTargetConstant(uint64_t Val, MVT::ValueType VT) { + SDOperand getTargetConstant(uint64_t Val, MVT VT) { return getConstant(Val, VT, true); } - SDOperand getTargetConstant(const APInt &Val, MVT::ValueType VT) { + SDOperand getTargetConstant(const APInt &Val, MVT VT) { return getConstant(Val, VT, true); } - SDOperand getConstantFP(double Val, MVT::ValueType VT, bool isTarget = false); - SDOperand getConstantFP(const APFloat& Val, MVT::ValueType VT, - bool isTarget = false); - SDOperand getTargetConstantFP(double Val, MVT::ValueType VT) { + SDOperand getConstantFP(double Val, MVT VT, bool isTarget = false); + SDOperand getConstantFP(const APFloat& Val, MVT VT, bool isTarget = false); + SDOperand getTargetConstantFP(double Val, MVT VT) { return getConstantFP(Val, VT, true); } - SDOperand getTargetConstantFP(const APFloat& Val, MVT::ValueType VT) { + SDOperand getTargetConstantFP(const APFloat& Val, MVT VT) { return getConstantFP(Val, VT, true); } - SDOperand getGlobalAddress(const GlobalValue *GV, MVT::ValueType VT, + SDOperand getGlobalAddress(const GlobalValue *GV, MVT VT, int offset = 0, bool isTargetGA = false); - SDOperand getTargetGlobalAddress(const GlobalValue *GV, MVT::ValueType VT, + SDOperand getTargetGlobalAddress(const GlobalValue *GV, MVT VT, int offset = 0) { return getGlobalAddress(GV, VT, offset, true); } - SDOperand getFrameIndex(int FI, MVT::ValueType VT, bool isTarget = false); - SDOperand getTargetFrameIndex(int FI, MVT::ValueType VT) { + SDOperand getFrameIndex(int FI, MVT VT, bool isTarget = false); + SDOperand getTargetFrameIndex(int FI, MVT VT) { return getFrameIndex(FI, VT, true); } - SDOperand getJumpTable(int JTI, MVT::ValueType VT, bool isTarget = false); - SDOperand getTargetJumpTable(int JTI, MVT::ValueType VT) { + SDOperand getJumpTable(int JTI, MVT VT, bool isTarget = false); + SDOperand getTargetJumpTable(int JTI, MVT VT) { return getJumpTable(JTI, VT, true); } - SDOperand getConstantPool(Constant *C, MVT::ValueType VT, + SDOperand getConstantPool(Constant *C, MVT VT, unsigned Align = 0, int Offs = 0, bool isT=false); - SDOperand getTargetConstantPool(Constant *C, MVT::ValueType VT, + SDOperand getTargetConstantPool(Constant *C, MVT VT, unsigned Align = 0, int Offset = 0) { return getConstantPool(C, VT, Align, Offset, true); } - SDOperand getConstantPool(MachineConstantPoolValue *C, MVT::ValueType VT, + SDOperand getConstantPool(MachineConstantPoolValue *C, MVT VT, unsigned Align = 0, int Offs = 0, bool isT=false); SDOperand getTargetConstantPool(MachineConstantPoolValue *C, - MVT::ValueType VT, unsigned Align = 0, + MVT VT, unsigned Align = 0, int Offset = 0) { return getConstantPool(C, VT, Align, Offset, true); } SDOperand getBasicBlock(MachineBasicBlock *MBB); - SDOperand getExternalSymbol(const char *Sym, MVT::ValueType VT); - SDOperand getTargetExternalSymbol(const char *Sym, MVT::ValueType VT); + SDOperand getExternalSymbol(const char *Sym, MVT VT); + SDOperand getTargetExternalSymbol(const char *Sym, MVT VT); SDOperand getArgFlags(ISD::ArgFlagsTy Flags); - SDOperand getValueType(MVT::ValueType); - SDOperand getRegister(unsigned Reg, MVT::ValueType VT); + SDOperand getValueType(MVT); + SDOperand getRegister(unsigned Reg, MVT VT); SDOperand getCopyToReg(SDOperand Chain, unsigned Reg, SDOperand N) { return getNode(ISD::CopyToReg, MVT::Other, Chain, @@ -232,7 +229,7 @@ public: // null) and that there should be a flag result. SDOperand getCopyToReg(SDOperand Chain, unsigned Reg, SDOperand N, SDOperand Flag) { - const MVT::ValueType *VTs = getNodeValueTypes(MVT::Other, MVT::Flag); + const MVT *VTs = getNodeValueTypes(MVT::Other, MVT::Flag); SDOperand Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Flag }; return getNode(ISD::CopyToReg, VTs, 2, Ops, Flag.Val ? 4 : 3); } @@ -240,13 +237,13 @@ public: // Similar to last getCopyToReg() except parameter Reg is a SDOperand SDOperand getCopyToReg(SDOperand Chain, SDOperand Reg, SDOperand N, SDOperand Flag) { - const MVT::ValueType *VTs = getNodeValueTypes(MVT::Other, MVT::Flag); + const MVT *VTs = getNodeValueTypes(MVT::Other, MVT::Flag); SDOperand Ops[] = { Chain, Reg, N, Flag }; return getNode(ISD::CopyToReg, VTs, 2, Ops, Flag.Val ? 4 : 3); } - SDOperand getCopyFromReg(SDOperand Chain, unsigned Reg, MVT::ValueType VT) { - const MVT::ValueType *VTs = getNodeValueTypes(VT, MVT::Other); + SDOperand getCopyFromReg(SDOperand Chain, unsigned Reg, MVT VT) { + const MVT *VTs = getNodeValueTypes(VT, MVT::Other); SDOperand Ops[] = { Chain, getRegister(Reg, VT) }; return getNode(ISD::CopyFromReg, VTs, 2, Ops, 2); } @@ -254,9 +251,9 @@ public: // This version of the getCopyFromReg method takes an extra operand, which // indicates that there is potentially an incoming flag value (if Flag is not // null) and that there should be a flag result. - SDOperand getCopyFromReg(SDOperand Chain, unsigned Reg, MVT::ValueType VT, + SDOperand getCopyFromReg(SDOperand Chain, unsigned Reg, MVT VT, SDOperand Flag) { - const MVT::ValueType *VTs = getNodeValueTypes(VT, MVT::Other, MVT::Flag); + const MVT *VTs = getNodeValueTypes(VT, MVT::Other, MVT::Flag); SDOperand Ops[] = { Chain, getRegister(Reg, VT), Flag }; return getNode(ISD::CopyFromReg, VTs, 3, Ops, Flag.Val ? 3 : 2); } @@ -265,12 +262,12 @@ public: /// getZeroExtendInReg - Return the expression required to zero extend the Op /// value assuming it was the smaller SrcTy value. - SDOperand getZeroExtendInReg(SDOperand Op, MVT::ValueType SrcTy); + SDOperand getZeroExtendInReg(SDOperand Op, MVT SrcTy); /// getCALLSEQ_START - Return a new CALLSEQ_START node, which always must have /// a flag result (to ensure it's not CSE'd). SDOperand getCALLSEQ_START(SDOperand Chain, SDOperand Op) { - const MVT::ValueType *VTs = getNodeValueTypes(MVT::Other, MVT::Flag); + const MVT *VTs = getNodeValueTypes(MVT::Other, MVT::Flag); SDOperand Ops[] = { Chain, Op }; return getNode(ISD::CALLSEQ_START, VTs, 2, Ops, 2); } @@ -291,27 +288,24 @@ public: /// getNode - Gets or creates the specified node. /// - SDOperand getNode(unsigned Opcode, MVT::ValueType VT); - SDOperand getNode(unsigned Opcode, MVT::ValueType VT, SDOperand N); - SDOperand getNode(unsigned Opcode, MVT::ValueType VT, - SDOperand N1, SDOperand N2); - SDOperand getNode(unsigned Opcode, MVT::ValueType VT, + SDOperand getNode(unsigned Opcode, MVT VT); + SDOperand getNode(unsigned Opcode, MVT VT, SDOperand N); + SDOperand getNode(unsigned Opcode, MVT VT, SDOperand N1, SDOperand N2); + SDOperand getNode(unsigned Opcode, MVT VT, SDOperand N1, SDOperand N2, SDOperand N3); - SDOperand getNode(unsigned Opcode, MVT::ValueType VT, + SDOperand getNode(unsigned Opcode, MVT VT, SDOperand N1, SDOperand N2, SDOperand N3, SDOperand N4); - SDOperand getNode(unsigned Opcode, MVT::ValueType VT, + SDOperand getNode(unsigned Opcode, MVT VT, SDOperand N1, SDOperand N2, SDOperand N3, SDOperand N4, SDOperand N5); - SDOperand getNode(unsigned Opcode, MVT::ValueType VT, + SDOperand getNode(unsigned Opcode, MVT VT, SDOperandPtr Ops, unsigned NumOps); + SDOperand getNode(unsigned Opcode, std::vector<MVT> &ResultTys, SDOperandPtr Ops, unsigned NumOps); - SDOperand getNode(unsigned Opcode, std::vector<MVT::ValueType> &ResultTys, - SDOperandPtr Ops, unsigned NumOps); - SDOperand getNode(unsigned Opcode, const MVT::ValueType *VTs, unsigned NumVTs, + SDOperand getNode(unsigned Opcode, const MVT *VTs, unsigned NumVTs, SDOperandPtr Ops, unsigned NumOps); SDOperand getNode(unsigned Opcode, SDVTList VTs); SDOperand getNode(unsigned Opcode, SDVTList VTs, SDOperand N); - SDOperand getNode(unsigned Opcode, SDVTList VTs, - SDOperand N1, SDOperand N2); + SDOperand getNode(unsigned Opcode, SDVTList VTs, SDOperand N1, SDOperand N2); SDOperand getNode(unsigned Opcode, SDVTList VTs, SDOperand N1, SDOperand N2, SDOperand N3); SDOperand getNode(unsigned Opcode, SDVTList VTs, @@ -340,7 +334,7 @@ public: /// getSetCC - Helper function to make it easier to build SetCC's if you just /// have an ISD::CondCode instead of an SDOperand. /// - SDOperand getSetCC(MVT::ValueType VT, SDOperand LHS, SDOperand RHS, + SDOperand getSetCC(MVT VT, SDOperand LHS, SDOperand RHS, ISD::CondCode Cond) { return getNode(ISD::SETCC, VT, LHS, RHS, getCondCode(Cond)); } @@ -348,7 +342,7 @@ public: /// getVSetCC - Helper function to make it easier to build VSetCC's nodes /// if you just have an ISD::CondCode instead of an SDOperand. /// - SDOperand getVSetCC(MVT::ValueType VT, SDOperand LHS, SDOperand RHS, + SDOperand getVSetCC(MVT VT, SDOperand LHS, SDOperand RHS, ISD::CondCode Cond) { return getNode(ISD::VSETCC, VT, LHS, RHS, getCondCode(Cond)); } @@ -364,35 +358,35 @@ public: /// getVAArg - VAArg produces a result and token chain, and takes a pointer /// and a source value as input. - SDOperand getVAArg(MVT::ValueType VT, SDOperand Chain, SDOperand Ptr, + SDOperand getVAArg(MVT VT, SDOperand Chain, SDOperand Ptr, SDOperand SV); /// getAtomic - Gets a node for an atomic op, produces result and chain, takes // 3 operands SDOperand getAtomic(unsigned Opcode, SDOperand Chain, SDOperand Ptr, - SDOperand Cmp, SDOperand Swp, MVT::ValueType VT); + SDOperand Cmp, SDOperand Swp, MVT VT); /// getAtomic - Gets a node for an atomic op, produces result and chain, takes // 2 operands SDOperand getAtomic(unsigned Opcode, SDOperand Chain, SDOperand Ptr, - SDOperand Val, MVT::ValueType VT); + SDOperand Val, MVT VT); /// getLoad - Loads are not normal binary operators: their result type is not /// determined by their operands, and they produce a value AND a token chain. /// - SDOperand getLoad(MVT::ValueType VT, SDOperand Chain, SDOperand Ptr, + SDOperand getLoad(MVT VT, SDOperand Chain, SDOperand Ptr, const Value *SV, int SVOffset, bool isVolatile=false, unsigned Alignment=0); - SDOperand getExtLoad(ISD::LoadExtType ExtType, MVT::ValueType VT, + SDOperand getExtLoad(ISD::LoadExtType ExtType, MVT VT, SDOperand Chain, SDOperand Ptr, const Value *SV, - int SVOffset, MVT::ValueType EVT, bool isVolatile=false, + int SVOffset, MVT EVT, bool isVolatile=false, unsigned Alignment=0); SDOperand getIndexedLoad(SDOperand OrigLoad, SDOperand Base, SDOperand Offset, ISD::MemIndexedMode AM); SDOperand getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, - MVT::ValueType VT, SDOperand Chain, + MVT VT, SDOperand Chain, SDOperand Ptr, SDOperand Offset, - const Value *SV, int SVOffset, MVT::ValueType EVT, + const Value *SV, int SVOffset, MVT EVT, bool isVolatile=false, unsigned Alignment=0); /// getStore - Helper function to build ISD::STORE nodes. @@ -401,7 +395,7 @@ public: const Value *SV, int SVOffset, bool isVolatile=false, unsigned Alignment=0); SDOperand getTruncStore(SDOperand Chain, SDOperand Val, SDOperand Ptr, - const Value *SV, int SVOffset, MVT::ValueType TVT, + const Value *SV, int SVOffset, MVT TVT, bool isVolatile=false, unsigned Alignment=0); SDOperand getIndexedStore(SDOperand OrigStoe, SDOperand Base, SDOperand Offset, ISD::MemIndexedMode AM); @@ -434,20 +428,18 @@ public: /// operands. Note that target opcodes are stored as /// ISD::BUILTIN_OP_END+TargetOpcode in the node opcode field. The 0th value /// of the resultant node is returned. - SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT::ValueType VT); - SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT::ValueType VT, - SDOperand Op1); - SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT::ValueType VT, + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT, SDOperand Op1); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT, SDOperand Op1, SDOperand Op2); - SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT::ValueType VT, + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT, SDOperand Op1, SDOperand Op2, SDOperand Op3); - SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT::ValueType VT, + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT, SDOperandPtr Ops, unsigned NumOps); - SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT::ValueType VT1, - MVT::ValueType VT2, SDOperand Op1, SDOperand Op2); - SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT::ValueType VT1, - MVT::ValueType VT2, SDOperand Op1, SDOperand Op2, - SDOperand Op3); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1, + MVT VT2, SDOperand Op1, SDOperand Op2); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1, + MVT VT2, SDOperand Op1, SDOperand Op2, SDOperand Op3); /// getTargetNode - These are used for target selectors to create a new node @@ -456,41 +448,30 @@ public: /// Note that getTargetNode returns the resultant node. If there is already a /// node of the specified opcode and operands, it returns that node instead of /// the current one. - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT, - SDOperand Op1); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT, - SDOperand Op1, SDOperand Op2); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT, + SDNode *getTargetNode(unsigned Opcode, MVT VT); + SDNode *getTargetNode(unsigned Opcode, MVT VT, SDOperand Op1); + SDNode *getTargetNode(unsigned Opcode, MVT VT, SDOperand Op1, SDOperand Op2); + SDNode *getTargetNode(unsigned Opcode, MVT VT, SDOperand Op1, SDOperand Op2, SDOperand Op3); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT, + SDNode *getTargetNode(unsigned Opcode, MVT VT, SDOperandPtr Ops, unsigned NumOps); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, SDOperand Op1); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, SDOperand Op1, SDOperand Op2); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, SDOperand Op1, SDOperand Op2, - SDOperand Op3); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, + SDNode *getTargetNode(unsigned Opcode, MVT VT1, MVT VT2); + SDNode *getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, SDOperand Op1); + SDNode *getTargetNode(unsigned Opcode, MVT VT1, + MVT VT2, SDOperand Op1, SDOperand Op2); + SDNode *getTargetNode(unsigned Opcode, MVT VT1, + MVT VT2, SDOperand Op1, SDOperand Op2, SDOperand Op3); + SDNode *getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, SDOperandPtr Ops, unsigned NumOps); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, MVT::ValueType VT3, + SDNode *getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3, SDOperand Op1, SDOperand Op2); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, MVT::ValueType VT3, + SDNode *getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3, SDOperand Op1, SDOperand Op2, SDOperand Op3); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, MVT::ValueType VT3, + SDNode *getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3, SDOperandPtr Ops, unsigned NumOps); - SDNode *getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, MVT::ValueType VT3, - MVT::ValueType VT4, + SDNode *getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3, MVT VT4, SDOperandPtr Ops, unsigned NumOps); - SDNode *getTargetNode(unsigned Opcode, std::vector<MVT::ValueType> &ResultTys, + SDNode *getTargetNode(unsigned Opcode, std::vector<MVT> &ResultTys, SDOperandPtr Ops, unsigned NumOps); /// getNodeIfExists - Get the specified node if it's already available, or @@ -570,10 +551,10 @@ public: /// CreateStackTemporary - Create a stack temporary, suitable for holding the /// specified value type. - SDOperand CreateStackTemporary(MVT::ValueType VT); + SDOperand CreateStackTemporary(MVT VT); /// FoldSetCC - Constant fold a setcc to true or false. - SDOperand FoldSetCC(MVT::ValueType VT, SDOperand N1, + SDOperand FoldSetCC(MVT VT, SDOperand N1, SDOperand N2, ISD::CondCode Cond); /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We @@ -623,13 +604,13 @@ private: void DeleteNodeNotInCSEMaps(SDNode *N); // List of non-single value types. - std::list<std::vector<MVT::ValueType> > VTList; + std::list<std::vector<MVT> > VTList; // Maps to auto-CSE operations. std::vector<CondCodeSDNode*> CondCodeNodes; std::vector<SDNode*> ValueTypeNodes; - std::map<MVT::ValueType, SDNode*> ExtendedValueTypeNodes; + std::map<MVT, SDNode*> ExtendedValueTypeNodes; std::map<std::string, SDNode*> ExternalSymbols; std::map<std::string, SDNode*> TargetExternalSymbols; std::map<std::string, StringSDNode*> StringNodes; diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h index f981687..fc6b9fb 100644 --- a/include/llvm/CodeGen/SelectionDAGISel.h +++ b/include/llvm/CodeGen/SelectionDAGISel.h @@ -55,7 +55,7 @@ public: virtual bool runOnFunction(Function &Fn); - unsigned MakeReg(MVT::ValueType VT); + unsigned MakeReg(MVT VT); virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {} virtual void InstructionSelectBasicBlock(SelectionDAG &SD) = 0; diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h index 081b0e1..c2e4f1f 100644 --- a/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -48,7 +48,7 @@ template<typename NodeTy> class ilist_iterator; /// SelectionDAG::getVTList(...). /// struct SDVTList { - const MVT::ValueType *VTs; + const MVT *VTs; unsigned short NumVTs; }; @@ -834,12 +834,12 @@ public: /// getValueType - Return the ValueType of the referenced return value. /// - inline MVT::ValueType getValueType() const; + inline MVT getValueType() const; - /// getValueSizeInBits - Returns MVT::getSizeInBits(getValueType()). + /// getValueSizeInBits - Returns the size of the value in bits. /// unsigned getValueSizeInBits() const { - return MVT::getSizeInBits(getValueType()); + return getValueType().getSizeInBits(); } // Forwarding methods - These forward to the corresponding methods in SDNode. @@ -1045,7 +1045,7 @@ private: /// ValueList - The types of the values this node defines. SDNode's may /// define multiple values simultaneously. - const MVT::ValueType *ValueList; + const MVT *ValueList; /// NumOperands/NumValues - The number of entries in the Operand/Value list. unsigned short NumOperands, NumValues; @@ -1216,7 +1216,7 @@ public: /// getValueType - Return the type of a specified result. /// - MVT::ValueType getValueType(unsigned ResNo) const { + MVT getValueType(unsigned ResNo) const { assert(ResNo < NumValues && "Illegal result number!"); return ValueList[ResNo]; } @@ -1224,10 +1224,10 @@ public: /// getValueSizeInBits - Returns MVT::getSizeInBits(getValueType(ResNo)). /// unsigned getValueSizeInBits(unsigned ResNo) const { - return MVT::getSizeInBits(getValueType(ResNo)); + return getValueType(ResNo).getSizeInBits(); } - typedef const MVT::ValueType* value_iterator; + typedef const MVT* value_iterator; value_iterator value_begin() const { return ValueList; } value_iterator value_end() const { return ValueList+NumValues; } @@ -1249,8 +1249,8 @@ protected: /// getValueTypeList - Return a pointer to the specified value type. /// - static const MVT::ValueType *getValueTypeList(MVT::ValueType VT); - static SDVTList getSDVTList(MVT::ValueType VT) { + static const MVT *getValueTypeList(MVT VT); + static SDVTList getSDVTList(MVT VT) { SDVTList Ret = { getValueTypeList(VT), 1 }; return Ret; } @@ -1344,7 +1344,7 @@ protected: inline unsigned SDOperand::getOpcode() const { return Val->getOpcode(); } -inline MVT::ValueType SDOperand::getValueType() const { +inline MVT SDOperand::getValueType() const { return Val->getValueType(ResNo); } inline unsigned SDOperand::getNumOperands() const { @@ -1439,10 +1439,10 @@ public: class AtomicSDNode : public SDNode { virtual void ANCHOR(); // Out-of-line virtual method to give class a home. SDUse Ops[4]; - MVT::ValueType OrigVT; + MVT OrigVT; public: AtomicSDNode(unsigned Opc, SDVTList VTL, SDOperand Chain, SDOperand Ptr, - SDOperand Cmp, SDOperand Swp, MVT::ValueType VT) + SDOperand Cmp, SDOperand Swp, MVT VT) : SDNode(Opc, VTL) { Ops[0] = Chain; Ops[1] = Ptr; @@ -1452,7 +1452,7 @@ public: OrigVT=VT; } AtomicSDNode(unsigned Opc, SDVTList VTL, SDOperand Chain, SDOperand Ptr, - SDOperand Val, MVT::ValueType VT) + SDOperand Val, MVT VT) : SDNode(Opc, VTL) { Ops[0] = Chain; Ops[1] = Ptr; @@ -1460,7 +1460,7 @@ public: InitOperands(Ops, 3); OrigVT=VT; } - MVT::ValueType getVT() const { return OrigVT; } + MVT getVT() const { return OrigVT; } bool isCompareAndSwap() const { return getOpcode() == ISD::ATOMIC_LCS; } }; @@ -1485,7 +1485,7 @@ class ConstantSDNode : public SDNode { virtual void ANCHOR(); // Out-of-line virtual method to give class a home. protected: friend class SelectionDAG; - ConstantSDNode(bool isTarget, const APInt &val, MVT::ValueType VT) + ConstantSDNode(bool isTarget, const APInt &val, MVT VT) : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, getSDVTList(VT)), Value(val) { } @@ -1495,13 +1495,13 @@ public: uint64_t getValue() const { return Value.getZExtValue(); } int64_t getSignExtended() const { - unsigned Bits = MVT::getSizeInBits(getValueType(0)); + unsigned Bits = getValueType(0).getSizeInBits(); return ((int64_t)Value.getZExtValue() << (64-Bits)) >> (64-Bits); } bool isNullValue() const { return Value == 0; } bool isAllOnesValue() const { - return Value == MVT::getIntVTBitMask(getValueType(0)); + return Value == getValueType(0).getIntegerVTBitMask(); } static bool classof(const ConstantSDNode *) { return true; } @@ -1516,7 +1516,7 @@ class ConstantFPSDNode : public SDNode { virtual void ANCHOR(); // Out-of-line virtual method to give class a home. protected: friend class SelectionDAG; - ConstantFPSDNode(bool isTarget, const APFloat& val, MVT::ValueType VT) + ConstantFPSDNode(bool isTarget, const APFloat& val, MVT VT) : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, getSDVTList(VT)), Value(val) { } @@ -1542,7 +1542,7 @@ public: } bool isExactlyValue(const APFloat& V) const; - bool isValueValidForType(MVT::ValueType VT, const APFloat& Val); + bool isValueValidForType(MVT VT, const APFloat& Val); static bool classof(const ConstantFPSDNode *) { return true; } static bool classof(const SDNode *N) { @@ -1557,8 +1557,7 @@ class GlobalAddressSDNode : public SDNode { virtual void ANCHOR(); // Out-of-line virtual method to give class a home. protected: friend class SelectionDAG; - GlobalAddressSDNode(bool isTarget, const GlobalValue *GA, MVT::ValueType VT, - int o = 0); + GlobalAddressSDNode(bool isTarget, const GlobalValue *GA, MVT VT, int o = 0); public: GlobalValue *getGlobal() const { return TheGlobal; } @@ -1578,7 +1577,7 @@ class FrameIndexSDNode : public SDNode { virtual void ANCHOR(); // Out-of-line virtual method to give class a home. protected: friend class SelectionDAG; - FrameIndexSDNode(int fi, MVT::ValueType VT, bool isTarg) + FrameIndexSDNode(int fi, MVT VT, bool isTarg) : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex, getSDVTList(VT)), FI(fi) { } @@ -1598,7 +1597,7 @@ class JumpTableSDNode : public SDNode { virtual void ANCHOR(); // Out-of-line virtual method to give class a home. protected: friend class SelectionDAG; - JumpTableSDNode(int jti, MVT::ValueType VT, bool isTarg) + JumpTableSDNode(int jti, MVT VT, bool isTarg) : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable, getSDVTList(VT)), JTI(jti) { } @@ -1623,22 +1622,20 @@ class ConstantPoolSDNode : public SDNode { virtual void ANCHOR(); // Out-of-line virtual method to give class a home. protected: friend class SelectionDAG; - ConstantPoolSDNode(bool isTarget, Constant *c, MVT::ValueType VT, - int o=0) + ConstantPoolSDNode(bool isTarget, Constant *c, MVT VT, int o=0) : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, getSDVTList(VT)), Offset(o), Alignment(0) { assert((int)Offset >= 0 && "Offset is too large"); Val.ConstVal = c; } - ConstantPoolSDNode(bool isTarget, Constant *c, MVT::ValueType VT, int o, - unsigned Align) + ConstantPoolSDNode(bool isTarget, Constant *c, MVT VT, int o, unsigned Align) : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, getSDVTList(VT)), Offset(o), Alignment(Align) { assert((int)Offset >= 0 && "Offset is too large"); Val.ConstVal = c; } ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, - MVT::ValueType VT, int o=0) + MVT VT, int o=0) : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, getSDVTList(VT)), Offset(o), Alignment(0) { assert((int)Offset >= 0 && "Offset is too large"); @@ -1646,7 +1643,7 @@ protected: Offset |= 1 << (sizeof(unsigned)*8-1); } ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, - MVT::ValueType VT, int o, unsigned Align) + MVT VT, int o, unsigned Align) : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, getSDVTList(VT)), Offset(o), Alignment(Align) { assert((int)Offset >= 0 && "Offset is too large"); @@ -1760,7 +1757,7 @@ class RegisterSDNode : public SDNode { virtual void ANCHOR(); // Out-of-line virtual method to give class a home. protected: friend class SelectionDAG; - RegisterSDNode(unsigned reg, MVT::ValueType VT) + RegisterSDNode(unsigned reg, MVT VT) : SDNode(ISD::Register, getSDVTList(VT)), Reg(reg) { } public: @@ -1778,7 +1775,7 @@ class ExternalSymbolSDNode : public SDNode { virtual void ANCHOR(); // Out-of-line virtual method to give class a home. protected: friend class SelectionDAG; - ExternalSymbolSDNode(bool isTarget, const char *Sym, MVT::ValueType VT) + ExternalSymbolSDNode(bool isTarget, const char *Sym, MVT VT) : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, getSDVTList(VT)), Symbol(Sym) { } @@ -1914,19 +1911,19 @@ public: } }; -/// VTSDNode - This class is used to represent MVT::ValueType's, which are used +/// VTSDNode - This class is used to represent MVT's, which are used /// to parameterize some operations. class VTSDNode : public SDNode { - MVT::ValueType ValueType; + MVT ValueType; virtual void ANCHOR(); // Out-of-line virtual method to give class a home. protected: friend class SelectionDAG; - explicit VTSDNode(MVT::ValueType VT) + explicit VTSDNode(MVT VT) : SDNode(ISD::VALUETYPE, getSDVTList(MVT::Other)), ValueType(VT) { } public: - MVT::ValueType getVT() const { return ValueType; } + MVT getVT() const { return ValueType; } static bool classof(const VTSDNode *) { return true; } static bool classof(const SDNode *N) { @@ -1942,7 +1939,7 @@ private: ISD::MemIndexedMode AddrMode; // MemoryVT - VT of in-memory value. - MVT::ValueType MemoryVT; + MVT MemoryVT; //! SrcValue - Memory location for alias analysis. const Value *SrcValue; @@ -1965,7 +1962,7 @@ protected: SDUse Ops[4]; public: LSBaseSDNode(ISD::NodeType NodeTy, SDOperand *Operands, unsigned numOperands, - SDVTList VTs, ISD::MemIndexedMode AM, MVT::ValueType VT, + SDVTList VTs, ISD::MemIndexedMode AM, MVT VT, const Value *SV, int SVO, unsigned Align, bool Vol) : SDNode(NodeTy, VTs), AddrMode(AM), MemoryVT(VT), @@ -1989,7 +1986,7 @@ public: const Value *getSrcValue() const { return SrcValue; } int getSrcValueOffset() const { return SVOffset; } unsigned getAlignment() const { return Alignment; } - MVT::ValueType getMemoryVT() const { return MemoryVT; } + MVT getMemoryVT() const { return MemoryVT; } bool isVolatile() const { return IsVolatile; } ISD::MemIndexedMode getAddressingMode() const { return AddrMode; } @@ -2022,7 +2019,7 @@ class LoadSDNode : public LSBaseSDNode { protected: friend class SelectionDAG; LoadSDNode(SDOperand *ChainPtrOff, SDVTList VTs, - ISD::MemIndexedMode AM, ISD::LoadExtType ETy, MVT::ValueType LVT, + ISD::MemIndexedMode AM, ISD::LoadExtType ETy, MVT LVT, const Value *SV, int O=0, unsigned Align=0, bool Vol=false) : LSBaseSDNode(ISD::LOAD, ChainPtrOff, 3, VTs, AM, LVT, SV, O, Align, Vol), @@ -2049,7 +2046,7 @@ class StoreSDNode : public LSBaseSDNode { protected: friend class SelectionDAG; StoreSDNode(SDOperand *ChainValuePtrOff, SDVTList VTs, - ISD::MemIndexedMode AM, bool isTrunc, MVT::ValueType SVT, + ISD::MemIndexedMode AM, bool isTrunc, MVT SVT, const Value *SV, int O=0, unsigned Align=0, bool Vol=false) : LSBaseSDNode(ISD::STORE, ChainValuePtrOff, 4, VTs, AM, SVT, SV, O, Align, Vol), diff --git a/include/llvm/CodeGen/ValueTypes.h b/include/llvm/CodeGen/ValueTypes.h index 6b20b7d..1469f81 100644 --- a/include/llvm/CodeGen/ValueTypes.h +++ b/include/llvm/CodeGen/ValueTypes.h @@ -24,368 +24,389 @@ namespace llvm { class Type; -/// MVT namespace - This namespace defines the SimpleValueType enum, which -/// contains the various low-level value types, and the ValueType typedef. -/// -namespace MVT { // MVT = Machine Value Types - enum SimpleValueType { - // If you change this numbering, you must change the values in ValueTypes.td - // well! - Other = 0, // This is a non-standard value - i1 = 1, // This is a 1 bit integer value - i8 = 2, // This is an 8 bit integer value - i16 = 3, // This is a 16 bit integer value - i32 = 4, // This is a 32 bit integer value - i64 = 5, // This is a 64 bit integer value - i128 = 6, // This is a 128 bit integer value - - FIRST_INTEGER_VALUETYPE = i1, - LAST_INTEGER_VALUETYPE = i128, - - f32 = 7, // This is a 32 bit floating point value - f64 = 8, // This is a 64 bit floating point value - f80 = 9, // This is a 80 bit floating point value - f128 = 10, // This is a 128 bit floating point value - ppcf128 = 11, // This is a PPC 128-bit floating point value - Flag = 12, // This is a condition code or machine flag. - - isVoid = 13, // This has no value - - v8i8 = 14, // 8 x i8 - v4i16 = 15, // 4 x i16 - v2i32 = 16, // 2 x i32 - v1i64 = 17, // 1 x i64 - v16i8 = 18, // 16 x i8 - v8i16 = 19, // 8 x i16 - v3i32 = 20, // 3 x i32 - v4i32 = 21, // 4 x i32 - v2i64 = 22, // 2 x i64 - - v2f32 = 23, // 2 x f32 - v3f32 = 24, // 3 x f32 - v4f32 = 25, // 4 x f32 - v2f64 = 26, // 2 x f64 - - FIRST_VECTOR_VALUETYPE = v8i8, - LAST_VECTOR_VALUETYPE = v2f64, - - LAST_VALUETYPE = 27, // This always remains at the end of the list. - - // fAny - Any floating-point or vector floating-point value. This is used - // for intrinsics that have overloadings based on floating-point types. - // This is only for tblgen's consumption! - fAny = 253, - - // iAny - An integer or vector integer value of any bit width. This is - // used for intrinsics that have overloadings based on integer bit widths. - // This is only for tblgen's consumption! - iAny = 254, - - // iPTR - An int value the size of the pointer of the current - // target. This should only be used internal to tblgen! - iPTR = 255 - }; + struct MVT { // MVT = Machine Value Type + + enum SimpleValueType { + // If you change this numbering, you must change the values in + // ValueTypes.td well! + Other = 0, // This is a non-standard value + i1 = 1, // This is a 1 bit integer value + i8 = 2, // This is an 8 bit integer value + i16 = 3, // This is a 16 bit integer value + i32 = 4, // This is a 32 bit integer value + i64 = 5, // This is a 64 bit integer value + i128 = 6, // This is a 128 bit integer value + + FIRST_INTEGER_VALUETYPE = i1, + LAST_INTEGER_VALUETYPE = i128, + + f32 = 7, // This is a 32 bit floating point value + f64 = 8, // This is a 64 bit floating point value + f80 = 9, // This is a 80 bit floating point value + f128 = 10, // This is a 128 bit floating point value + ppcf128 = 11, // This is a PPC 128-bit floating point value + Flag = 12, // This is a condition code or machine flag. + + isVoid = 13, // This has no value + + v8i8 = 14, // 8 x i8 + v4i16 = 15, // 4 x i16 + v2i32 = 16, // 2 x i32 + v1i64 = 17, // 1 x i64 + v16i8 = 18, // 16 x i8 + v8i16 = 19, // 8 x i16 + v3i32 = 20, // 3 x i32 + v4i32 = 21, // 4 x i32 + v2i64 = 22, // 2 x i64 + + v2f32 = 23, // 2 x f32 + v3f32 = 24, // 3 x f32 + v4f32 = 25, // 4 x f32 + v2f64 = 26, // 2 x f64 + + FIRST_VECTOR_VALUETYPE = v8i8, + LAST_VECTOR_VALUETYPE = v2f64, + + LAST_VALUETYPE = 27, // This always remains at the end of the list. + + // fAny - Any floating-point or vector floating-point value. This is used + // for intrinsics that have overloadings based on floating-point types. + // This is only for tblgen's consumption! + fAny = 253, + + // iAny - An integer or vector integer value of any bit width. This is + // used for intrinsics that have overloadings based on integer bit widths. + // This is only for tblgen's consumption! + iAny = 254, + + // iPTR - An int value the size of the pointer of the current + // target. This should only be used internal to tblgen! + iPTR = 255 + }; + + /// MVT - This type holds low-level value types. Valid values include any of + /// the values in the SimpleValueType enum, or any value returned from one + /// of the MVT methods. Any value type equal to one of the SimpleValueType + /// enum values is a "simple" value type. All others are "extended". + /// + /// Note that simple doesn't necessary mean legal for the target machine. + /// All legal value types must be simple, but often there are some simple + /// value types that are not legal. + /// + /// @internal + /// Extended types are either vector types or arbitrary precision integers. + /// Arbitrary precision integers have iAny in the first SimpleTypeBits bits, + /// and the bit-width in the next PrecisionBits bits, offset by minus one. + /// Vector types are encoded by having the first SimpleTypeBits+PrecisionBits + /// bits encode the vector element type (which must be a scalar type, possibly + /// an arbitrary precision integer) and the remaining VectorBits upper bits + /// encode the vector length, offset by one. + /// + /// 31--------------16-----------8-------------0 + /// | Vector length | Precision | Simple type | + /// | | Vector element | + /// + + static const int SimpleTypeBits = 8; + static const int PrecisionBits = 8; + static const int VectorBits = 32 - SimpleTypeBits - PrecisionBits; + + static const uint32_t SimpleTypeMask = + (~uint32_t(0) << (32 - SimpleTypeBits)) >> (32 - SimpleTypeBits); + + static const uint32_t PrecisionMask = + ((~uint32_t(0) << VectorBits) >> (32 - PrecisionBits)) << SimpleTypeBits; + + static const uint32_t VectorMask = + (~uint32_t(0) >> (32 - VectorBits)) << (32 - VectorBits); + + static const uint32_t ElementMask = + (~uint32_t(0) << VectorBits) >> VectorBits; + + uint32_t V; + + MVT() {} + MVT(SimpleValueType S) { V = S; } + inline bool operator== (const MVT VT) const { return V == VT.V; } + inline bool operator!= (const MVT VT) const { return V != VT.V; } + + /// FIXME: The following comparison methods are bogus - they are only here + /// to ease the transition to a struct type. + inline bool operator< (const MVT VT) const { return V < VT.V; } + inline bool operator<= (const MVT VT) const { return V <= VT.V; } + inline bool operator> (const MVT VT) const { return V > VT.V; } + inline bool operator>= (const MVT VT) const { return V >= VT.V; } + + /// getIntegerVT - Returns the MVT that represents an integer with the given + /// number of bits. + static inline MVT getIntegerVT(unsigned BitWidth) { + switch (BitWidth) { + default: + break; + case 1: + return i1; + case 8: + return i8; + case 16: + return i16; + case 32: + return i32; + case 64: + return i64; + case 128: + return i128; + } + MVT VT; + VT.V = iAny | (((BitWidth - 1) << SimpleTypeBits) & PrecisionMask); + assert(VT.getSizeInBits() == BitWidth && "Bad bit width!"); + return VT; + } + + /// getVectorVT - Returns the MVT that represents a vector NumElements in + /// length, where each element is of type VT. + static inline MVT getVectorVT(MVT VT, unsigned NumElements) { + switch (VT.V) { + default: + break; + case i8: + if (NumElements == 8) return v8i8; + if (NumElements == 16) return v16i8; + break; + case i16: + if (NumElements == 4) return v4i16; + if (NumElements == 8) return v8i16; + break; + case i32: + if (NumElements == 2) return v2i32; + if (NumElements == 3) return v3i32; + if (NumElements == 4) return v4i32; + break; + case i64: + if (NumElements == 1) return v1i64; + if (NumElements == 2) return v2i64; + break; + case f32: + if (NumElements == 2) return v2f32; + if (NumElements == 3) return v3f32; + if (NumElements == 4) return v4f32; + break; + case f64: + if (NumElements == 2) return v2f64; + break; + } + // Set the length with the top bit forced to zero (needed by the verifier). + MVT Result; + Result.V = VT.V | (((NumElements + 1) << (33 - VectorBits)) >> 1); + assert(Result.getVectorElementType() == VT && + "Bad vector element type!"); + assert(Result.getVectorNumElements() == NumElements && + "Bad vector length!"); + return Result; + } + + /// getIntVectorWithNumElements - Return any integer vector type that has + /// the specified number of elements. + static inline MVT getIntVectorWithNumElements(unsigned NumElts) { + switch (NumElts) { + default: return getVectorVT(i8, NumElts); + case 1: return v1i64; + case 2: return v2i32; + case 3: return v3i32; + case 4: return v4i16; + case 8: return v8i8; + case 16: return v16i8; + } + } + + + /// isSimple - Test if the given MVT is simple (as opposed to being + /// extended). + inline bool isSimple() const { + return V <= SimpleTypeMask; + } - /// MVT::ValueType - This type holds low-level value types. Valid values - /// include any of the values in the SimpleValueType enum, or any value - /// returned from a function in the MVT namespace that has a ValueType - /// return type. Any value type equal to one of the SimpleValueType enum - /// values is a "simple" value type. All other value types are "extended". - /// - /// Note that simple doesn't necessary mean legal for the target machine. - /// All legal value types must be simple, but often there are some simple - /// value types that are not legal. - /// - /// @internal - /// Extended types are either vector types or arbitrary precision integers. - /// Arbitrary precision integers have iAny in the first SimpleTypeBits bits, - /// and the bit-width in the next PrecisionBits bits, offset by minus one. - /// Vector types are encoded by having the first SimpleTypeBits+PrecisionBits - /// bits encode the vector element type (which must be a scalar type, possibly - /// an arbitrary precision integer) and the remaining VectorBits upper bits - /// encode the vector length, offset by one. - /// - /// 31--------------16-----------8-------------0 - /// | Vector length | Precision | Simple type | - /// | | Vector element | - /// - /// Note that the verifier currently requires the top bit to be zero. - - typedef uint32_t ValueType; - - static const int SimpleTypeBits = 8; - static const int PrecisionBits = 8; - static const int VectorBits = 32 - SimpleTypeBits - PrecisionBits; - - static const uint32_t SimpleTypeMask = - (~uint32_t(0) << (32 - SimpleTypeBits)) >> (32 - SimpleTypeBits); - - static const uint32_t PrecisionMask = - ((~uint32_t(0) << VectorBits) >> (32 - PrecisionBits)) << SimpleTypeBits; - - static const uint32_t VectorMask = - (~uint32_t(0) >> (32 - VectorBits)) << (32 - VectorBits); - - static const uint32_t ElementMask = - (~uint32_t(0) << VectorBits) >> VectorBits; - - /// MVT::isExtendedVT - Test if the given ValueType is extended - /// (as opposed to being simple). - static inline bool isExtendedVT(ValueType VT) { - return VT > SimpleTypeMask; - } - - /// MVT::isInteger - Return true if this is an integer, or a vector integer - /// type. - static inline bool isInteger(ValueType VT) { - ValueType SVT = VT & SimpleTypeMask; - return (SVT >= FIRST_INTEGER_VALUETYPE && SVT <= LAST_INTEGER_VALUETYPE) || - (SVT >= v8i8 && SVT <= v2i64) || (SVT == iAny && (VT & PrecisionMask)); - } - - /// MVT::isFloatingPoint - Return true if this is an FP, or a vector FP type. - static inline bool isFloatingPoint(ValueType VT) { - ValueType SVT = VT & SimpleTypeMask; - return (SVT >= f32 && SVT <= ppcf128) || (SVT >= v2f32 && SVT <= v2f64); - } - - /// MVT::isVector - Return true if this is a vector value type. - static inline bool isVector(ValueType VT) { - return (VT >= FIRST_VECTOR_VALUETYPE && VT <= LAST_VECTOR_VALUETYPE) || - (VT & VectorMask); - } - - /// MVT::getVectorElementType - Given a vector type, return the type of - /// each element. - static inline ValueType getVectorElementType(ValueType VT) { - assert(isVector(VT) && "Invalid vector type!"); - switch (VT) { - default: - assert(isExtendedVT(VT) && "Unknown simple vector type!"); - return VT & ElementMask; - case v8i8 : - case v16i8: return i8; - case v4i16: - case v8i16: return i16; - case v2i32: - case v3i32: - case v4i32: return i32; - case v1i64: - case v2i64: return i64; - case v2f32: - case v3f32: - case v4f32: return f32; - case v2f64: return f64; + /// isExtended - Test if the given MVT is extended (as opposed to + /// being simple). + inline bool isExtended() const { + return !isSimple(); } - } - - /// MVT::getVectorNumElements - Given a vector type, return the - /// number of elements it contains. - static inline unsigned getVectorNumElements(ValueType VT) { - assert(isVector(VT) && "Invalid vector type!"); - switch (VT) { - default: - assert(isExtendedVT(VT) && "Unknown simple vector type!"); - return ((VT & VectorMask) >> (32 - VectorBits)) - 1; - case v16i8: return 16; - case v8i8 : - case v8i16: return 8; - case v4i16: - case v4i32: - case v4f32: return 4; - case v3i32: - case v3f32: return 3; - case v2i32: - case v2i64: - case v2f32: - case v2f64: return 2; - case v1i64: return 1; + + /// isFloatingPoint - Return true if this is a FP, or a vector FP type. + inline bool isFloatingPoint() const { + uint32_t SVT = V & SimpleTypeMask; + return (SVT >= f32 && SVT <= ppcf128) || (SVT >= v2f32 && SVT <= v2f64); } - } - - /// MVT::getSizeInBits - Return the size of the specified value type - /// in bits. - /// - static inline unsigned getSizeInBits(ValueType VT) { - switch (VT) { - default: - assert(isExtendedVT(VT) && "ValueType has no known size!"); - if (isVector(VT)) - return getSizeInBits(getVectorElementType(VT)) * - getVectorNumElements(VT); - if (isInteger(VT)) - return ((VT & PrecisionMask) >> SimpleTypeBits) + 1; - assert(0 && "Unknown value type!"); - case MVT::i1 : return 1; - case MVT::i8 : return 8; - case MVT::i16 : return 16; - case MVT::f32 : - case MVT::i32 : return 32; - case MVT::f64 : - case MVT::i64 : - case MVT::v8i8: - case MVT::v4i16: - case MVT::v2i32: - case MVT::v1i64: - case MVT::v2f32: return 64; - case MVT::f80 : return 80; - case MVT::v3i32: - case MVT::v3f32: return 96; - case MVT::f128: - case MVT::ppcf128: - case MVT::i128: - case MVT::v16i8: - case MVT::v8i16: - case MVT::v4i32: - case MVT::v2i64: - case MVT::v4f32: - case MVT::v2f64: return 128; + + /// isInteger - Return true if this is an integer, or a vector integer type. + inline bool isInteger() const { + uint32_t SVT = V & SimpleTypeMask; + return (SVT >= FIRST_INTEGER_VALUETYPE && SVT <= LAST_INTEGER_VALUETYPE) || + (SVT >= v8i8 && SVT <= v2i64) || (SVT == iAny && (V & PrecisionMask)); } - } - - /// MVT::getStoreSizeInBits - Return the number of bits overwritten by a - /// store of the specified value type. - /// - static inline unsigned getStoreSizeInBits(ValueType VT) { - return (getSizeInBits(VT) + 7)/8*8; - } - - /// MVT::is64BitVector - Return true if this is a 64-bit vector type. - static inline bool is64BitVector(ValueType VT) { - return (VT==v8i8 || VT==v4i16 || VT==v2i32 || VT==v1i64 || VT==v2f32 || - (isExtendedVT(VT) && isVector(VT) && getSizeInBits(VT)==64)); - } - - /// MVT::is128BitVector - Return true if this is a 128-bit vector type. - static inline bool is128BitVector(ValueType VT) { - return (VT==v16i8 || VT==v8i16 || VT==v4i32 || VT==v2i64 || - VT==v4f32 || VT==v2f64 || - (isExtendedVT(VT) && isVector(VT) && getSizeInBits(VT)==128)); - } - - /// MVT::getIntegerType - Returns the ValueType that represents an integer - /// with the given number of bits. - /// - static inline ValueType getIntegerType(unsigned BitWidth) { - switch (BitWidth) { - default: - break; - case 1: - return MVT::i1; - case 8: - return MVT::i8; - case 16: - return MVT::i16; - case 32: - return MVT::i32; - case 64: - return MVT::i64; - case 128: - return MVT::i128; + + /// isVector - Return true if this is a vector value type. + inline bool isVector() const { + return (V >= FIRST_VECTOR_VALUETYPE && V <= LAST_VECTOR_VALUETYPE) || + (V & VectorMask); } - ValueType Result = iAny | - (((BitWidth - 1) << SimpleTypeBits) & PrecisionMask); - assert(getSizeInBits(Result) == BitWidth && "Bad bit width!"); - return Result; - } - - /// MVT::RoundIntegerType - Rounds the bit-width of the given integer - /// ValueType up to the nearest power of two (and at least to eight), - /// and returns the integer ValueType with that number of bits. - /// - static inline ValueType RoundIntegerType(ValueType VT) { - assert(isInteger(VT) && !isVector(VT) && "Invalid integer type!"); - unsigned BitWidth = getSizeInBits(VT); - if (BitWidth <= 8) - return MVT::i8; - else - return getIntegerType(1 << Log2_32_Ceil(BitWidth)); - } - - /// MVT::getVectorType - Returns the ValueType that represents a vector - /// NumElements in length, where each element is of type VT. - /// - static inline ValueType getVectorType(ValueType VT, unsigned NumElements) { - switch (VT) { - default: - break; - case MVT::i8: - if (NumElements == 8) return MVT::v8i8; - if (NumElements == 16) return MVT::v16i8; - break; - case MVT::i16: - if (NumElements == 4) return MVT::v4i16; - if (NumElements == 8) return MVT::v8i16; - break; - case MVT::i32: - if (NumElements == 2) return MVT::v2i32; - if (NumElements == 3) return MVT::v3i32; - if (NumElements == 4) return MVT::v4i32; - break; - case MVT::i64: - if (NumElements == 1) return MVT::v1i64; - if (NumElements == 2) return MVT::v2i64; - break; - case MVT::f32: - if (NumElements == 2) return MVT::v2f32; - if (NumElements == 3) return MVT::v3f32; - if (NumElements == 4) return MVT::v4f32; - break; - case MVT::f64: - if (NumElements == 2) return MVT::v2f64; - break; + + /// is64BitVector - Return true if this is a 64-bit vector type. + inline bool is64BitVector() const { + return (V==v8i8 || V==v4i16 || V==v2i32 || V==v1i64 || V==v2f32 || + (isExtended() && isVector() && getSizeInBits()==64)); } - // Set the length with the top bit forced to zero (needed by the verifier). - ValueType Result = VT | (((NumElements + 1) << (33 - VectorBits)) >> 1); - assert(getVectorElementType(Result) == VT && - "Bad vector element type!"); - assert(getVectorNumElements(Result) == NumElements && - "Bad vector length!"); - return Result; - } - - /// MVT::getIntVectorWithNumElements - Return any integer vector type that has - /// the specified number of elements. - static inline ValueType getIntVectorWithNumElements(unsigned NumElts) { - switch (NumElts) { - default: return getVectorType(i8, NumElts); - case 1: return v1i64; - case 2: return v2i32; - case 3: return v3i32; - case 4: return v4i16; - case 8: return v8i8; - case 16: return v16i8; + + /// is128BitVector - Return true if this is a 128-bit vector type. + inline bool is128BitVector() const { + return (V==v16i8 || V==v8i16 || V==v4i32 || V==v2i64 || + V==v4f32 || V==v2f64 || + (isExtended() && isVector() && getSizeInBits()==128)); } - } - - - /// MVT::getIntVTBitMask - Return an integer with 1's every place there are - /// bits in the specified integer value type. - static inline uint64_t getIntVTBitMask(ValueType VT) { - assert(isInteger(VT) && !isVector(VT) && "Only applies to int scalars!"); - return ~uint64_t(0UL) >> (64-getSizeInBits(VT)); - } - /// MVT::getIntVTSignBit - Return an integer with a 1 in the position of the - /// sign bit for the specified integer value type. - static inline uint64_t getIntVTSignBit(ValueType VT) { - assert(isInteger(VT) && !isVector(VT) && "Only applies to int scalars!"); - return uint64_t(1UL) << (getSizeInBits(VT)-1); - } - - /// MVT::getValueTypeString - This function returns value type as a string, - /// e.g. "i32". - std::string getValueTypeString(ValueType VT); - - /// MVT::getTypeForValueType - This method returns an LLVM type corresponding - /// to the specified ValueType. For integer types, this returns an unsigned - /// type. Note that this will abort for types that cannot be represented. - const Type *getTypeForValueType(ValueType VT); - - /// MVT::getValueType - Return the value type corresponding to the specified - /// type. This returns all pointers as MVT::iPTR. If HandleUnknown is true, - /// unknown types are returned as Other, otherwise they are invalid. - ValueType getValueType(const Type *Ty, bool HandleUnknown = false); -} + + + /// getSimpleVT - Return the SimpleValueType held in the specified + /// simple MVT. + inline SimpleValueType getSimpleVT() const { + assert(isSimple() && "Expected a SimpleValueType!"); + return (SimpleValueType)V; + } + + /// getVectorElementType - Given a vector type, return the type of + /// each element. + inline MVT getVectorElementType() const { + assert(isVector() && "Invalid vector type!"); + switch (V) { + default: { + assert(isExtended() && "Unknown simple vector type!"); + MVT VT; + VT.V = V & ElementMask; + return VT; + } + case v8i8 : + case v16i8: return i8; + case v4i16: + case v8i16: return i16; + case v2i32: + case v3i32: + case v4i32: return i32; + case v1i64: + case v2i64: return i64; + case v2f32: + case v3f32: + case v4f32: return f32; + case v2f64: return f64; + } + } + + /// getVectorNumElements - Given a vector type, return the number of + /// elements it contains. + inline unsigned getVectorNumElements() const { + assert(isVector() && "Invalid vector type!"); + switch (V) { + default: + assert(isExtended() && "Unknown simple vector type!"); + return ((V & VectorMask) >> (32 - VectorBits)) - 1; + case v16i8: return 16; + case v8i8 : + case v8i16: return 8; + case v4i16: + case v4i32: + case v4f32: return 4; + case v3i32: + case v3f32: return 3; + case v2i32: + case v2i64: + case v2f32: + case v2f64: return 2; + case v1i64: return 1; + } + } + + /// getSizeInBits - Return the size of the specified value type in bits. + inline unsigned getSizeInBits() const { + switch (V) { + default: + assert(isExtended() && "MVT has no known size!"); + if (isVector()) + return getVectorElementType().getSizeInBits()*getVectorNumElements(); + if (isInteger()) + return ((V & PrecisionMask) >> SimpleTypeBits) + 1; + assert(false && "Unknown value type!"); + return 0; + case i1 : return 1; + case i8 : return 8; + case i16 : return 16; + case f32 : + case i32 : return 32; + case f64 : + case i64 : + case v8i8: + case v4i16: + case v2i32: + case v1i64: + case v2f32: return 64; + case f80 : return 80; + case v3i32: + case v3f32: return 96; + case f128: + case ppcf128: + case i128: + case v16i8: + case v8i16: + case v4i32: + case v2i64: + case v4f32: + case v2f64: return 128; + } + } + + /// getStoreSizeInBits - Return the number of bits overwritten by a store + /// of the specified value type. + inline unsigned getStoreSizeInBits() const { + return (getSizeInBits() + 7)/8*8; + } + + /// getRoundIntegerType - Rounds the bit-width of the given integer MVT up + /// to the nearest power of two (and at least to eight), and returns the + /// integer MVT with that number of bits. + inline MVT getRoundIntegerType() const { + assert(isInteger() && !isVector() && "Invalid integer type!"); + unsigned BitWidth = getSizeInBits(); + if (BitWidth <= 8) + return i8; + else + return getIntegerVT(1 << Log2_32_Ceil(BitWidth)); + } + + /// getIntegerVTBitMask - Return an integer with 1's every place there are + /// bits in the specified integer value type. FIXME: Should return an apint. + inline uint64_t getIntegerVTBitMask() const { + assert(isInteger() && !isVector() && "Only applies to int scalars!"); + return ~uint64_t(0UL) >> (64-getSizeInBits()); + } + + /// getIntegerVTSignBit - Return an integer with a 1 in the position of the + /// sign bit for the specified integer value type. FIXME: Should return an + /// apint. + inline uint64_t getIntegerVTSignBit() const { + assert(isInteger() && !isVector() && "Only applies to int scalars!"); + return uint64_t(1UL) << (getSizeInBits()-1); + } + + /// getMVTString - This function returns value type as a string, + /// e.g. "i32". + std::string getMVTString() const; + + /// getTypeForMVT - This method returns an LLVM type corresponding to the + /// specified MVT. For integer types, this returns an unsigned type. Note + /// that this will abort for types that cannot be represented. + const Type *getTypeForMVT() const; + + /// getMVT - Return the value type corresponding to the specified type. + /// This returns all pointers as iPTR. If HandleUnknown is true, unknown + /// types are returned as Other, otherwise they are invalid. + static MVT getMVT(const Type *Ty, bool HandleUnknown = false); + }; } // End llvm namespace diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 5ba1018..eb436d6 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -90,8 +90,8 @@ public: bool isBigEndian() const { return !IsLittleEndian; } bool isLittleEndian() const { return IsLittleEndian; } - MVT::ValueType getPointerTy() const { return PointerTy; } - MVT::ValueType getShiftAmountTy() const { return ShiftAmountTy; } + MVT getPointerTy() const { return PointerTy; } + MVT getShiftAmountTy() const { return ShiftAmountTy; } OutOfRangeShiftAmount getShiftAmountFlavor() const {return ShiftAmtHandling; } /// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC @@ -112,7 +112,7 @@ public: /// getSetCCResultType - Return the ValueType of the result of setcc /// operations. - virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDOperand &) const; /// getSetCCResultContents - For targets without boolean registers, this flag /// returns information about the contents of the high-bits in the setcc @@ -126,9 +126,9 @@ public: /// getRegClassFor - Return the register class that should be used for the /// specified value type. This may only be called on legal types. - TargetRegisterClass *getRegClassFor(MVT::ValueType VT) const { - assert(VT < array_lengthof(RegClassForVT)); - TargetRegisterClass *RC = RegClassForVT[VT]; + TargetRegisterClass *getRegClassFor(MVT VT) const { + assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); + TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT()]; assert(RC && "This value type is not natively supported!"); return RC; } @@ -136,9 +136,10 @@ public: /// isTypeLegal - Return true if the target has native support for the /// specified value type. This means that it has a register that directly /// holds it without promotions or expansions. - bool isTypeLegal(MVT::ValueType VT) const { - assert(MVT::isExtendedVT(VT) || VT < array_lengthof(RegClassForVT)); - return !MVT::isExtendedVT(VT) && RegClassForVT[VT] != 0; + bool isTypeLegal(MVT VT) const { + assert(!VT.isSimple() || + (unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); + return VT.isSimple() && RegClassForVT[VT.getSimpleVT()] != 0; } class ValueTypeActionImpl { @@ -155,20 +156,23 @@ public: ValueTypeActions[1] = RHS.ValueTypeActions[1]; } - LegalizeAction getTypeAction(MVT::ValueType VT) const { - if (MVT::isExtendedVT(VT)) { - if (MVT::isVector(VT)) return Expand; - if (MVT::isInteger(VT)) + LegalizeAction getTypeAction(MVT VT) const { + if (VT.isExtended()) { + if (VT.isVector()) return Expand; + if (VT.isInteger()) // First promote to a power-of-two size, then expand if necessary. - return VT == MVT::RoundIntegerType(VT) ? Expand : Promote; + return VT == VT.getRoundIntegerType() ? Expand : Promote; assert(0 && "Unsupported extended type!"); + return Legal; } - assert(VT<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); - return (LegalizeAction)((ValueTypeActions[VT>>4] >> ((2*VT) & 31)) & 3); + unsigned I = VT.getSimpleVT(); + assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); + return (LegalizeAction)((ValueTypeActions[I>>4] >> ((2*I) & 31)) & 3); } - void setTypeAction(MVT::ValueType VT, LegalizeAction Action) { - assert(VT<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); - ValueTypeActions[VT>>4] |= Action << ((VT*2) & 31); + void setTypeAction(MVT VT, LegalizeAction Action) { + unsigned I = VT.getSimpleVT(); + assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); + ValueTypeActions[I>>4] |= Action << ((I*2) & 31); } }; @@ -180,7 +184,7 @@ public: /// it is already legal (return 'Legal') or we need to promote it to a larger /// type (return 'Promote'), or we need to expand it into multiple registers /// of smaller integer type (return 'Expand'). 'Custom' is not an option. - LegalizeAction getTypeAction(MVT::ValueType VT) const { + LegalizeAction getTypeAction(MVT VT) const { return ValueTypeActions.getTypeAction(VT); } @@ -190,37 +194,37 @@ public: /// than the largest integer register, this contains one step in the expansion /// to get to the smaller register. For illegal floating point types, this /// returns the integer type to transform to. - MVT::ValueType getTypeToTransformTo(MVT::ValueType VT) const { - if (!MVT::isExtendedVT(VT)) { - assert(VT < array_lengthof(TransformToType)); - MVT::ValueType NVT = TransformToType[VT]; + MVT getTypeToTransformTo(MVT VT) const { + if (VT.isSimple()) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(TransformToType)); + MVT NVT = TransformToType[VT.getSimpleVT()]; assert(getTypeAction(NVT) != Promote && "Promote may not follow Expand or Promote"); return NVT; } - if (MVT::isVector(VT)) - return MVT::getVectorType(MVT::getVectorElementType(VT), - MVT::getVectorNumElements(VT) / 2); - if (MVT::isInteger(VT)) { - MVT::ValueType NVT = MVT::RoundIntegerType(VT); + if (VT.isVector()) + return MVT::getVectorVT(VT.getVectorElementType(), + VT.getVectorNumElements() / 2); + if (VT.isInteger()) { + MVT NVT = VT.getRoundIntegerType(); if (NVT == VT) // Size is a power of two - expand to half the size. - return MVT::getIntegerType(MVT::getSizeInBits(VT) / 2); + return MVT::getIntegerVT(VT.getSizeInBits() / 2); else // Promote to a power of two size, avoiding multi-step promotion. return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; } assert(0 && "Unsupported extended type!"); - return MVT::ValueType(); // Not reached + return MVT(); // Not reached } /// getTypeToExpandTo - For types supported by the target, this is an /// identity function. For types that must be expanded (i.e. integer types /// that are larger than the largest integer register or illegal floating /// point types), this returns the largest legal type it will be expanded to. - MVT::ValueType getTypeToExpandTo(MVT::ValueType VT) const { - assert(!MVT::isVector(VT)); + MVT getTypeToExpandTo(MVT VT) const { + assert(!VT.isVector()); while (true) { switch (getTypeAction(VT)) { case Legal: @@ -245,10 +249,10 @@ public: /// register. It also returns the VT and quantity of the intermediate values /// before they are promoted/expanded. /// - unsigned getVectorTypeBreakdown(MVT::ValueType VT, - MVT::ValueType &IntermediateVT, + unsigned getVectorTypeBreakdown(MVT VT, + MVT &IntermediateVT, unsigned &NumIntermediates, - MVT::ValueType &RegisterVT) const; + MVT &RegisterVT) const; typedef std::vector<APFloat>::const_iterator legal_fpimm_iterator; legal_fpimm_iterator legal_fpimm_begin() const { @@ -262,7 +266,7 @@ public: /// support *some* VECTOR_SHUFFLE operations, those with specific masks. /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values /// are assumed to be legal. - virtual bool isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { + virtual bool isShuffleMaskLegal(SDOperand Mask, MVT VT) const { return true; } @@ -271,7 +275,7 @@ public: /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant /// pool entry. virtual bool isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps, - MVT::ValueType EVT, + MVT EVT, SelectionDAG &DAG) const { return false; } @@ -280,16 +284,17 @@ public: /// it is legal, needs to be promoted to a larger size, needs to be /// expanded to some other code sequence, or the target has a custom expander /// for it. - LegalizeAction getOperationAction(unsigned Op, MVT::ValueType VT) const { - if (MVT::isExtendedVT(VT)) return Expand; + LegalizeAction getOperationAction(unsigned Op, MVT VT) const { + if (VT.isExtended()) return Expand; assert(Op < array_lengthof(OpActions) && - VT < sizeof(OpActions[0])*4 && "Table isn't big enough!"); - return (LegalizeAction)((OpActions[Op] >> (2*VT)) & 3); + (unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((OpActions[Op] >> (2*VT.getSimpleVT())) & 3); } /// isOperationLegal - Return true if the specified operation is legal on this /// target. - bool isOperationLegal(unsigned Op, MVT::ValueType VT) const { + bool isOperationLegal(unsigned Op, MVT VT) const { return getOperationAction(Op, VT) == Legal || getOperationAction(Op, VT) == Custom; } @@ -298,16 +303,17 @@ public: /// either it is legal, needs to be promoted to a larger size, needs to be /// expanded to some other code sequence, or the target has a custom expander /// for it. - LegalizeAction getLoadXAction(unsigned LType, MVT::ValueType VT) const { + LegalizeAction getLoadXAction(unsigned LType, MVT VT) const { assert(LType < array_lengthof(LoadXActions) && - VT < sizeof(LoadXActions[0])*4 && "Table isn't big enough!"); - return (LegalizeAction)((LoadXActions[LType] >> (2*VT)) & 3); + (unsigned)VT.getSimpleVT() < sizeof(LoadXActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((LoadXActions[LType] >> (2*VT.getSimpleVT())) & 3); } /// isLoadXLegal - Return true if the specified load with extension is legal /// on this target. - bool isLoadXLegal(unsigned LType, MVT::ValueType VT) const { - return !MVT::isExtendedVT(VT) && + bool isLoadXLegal(unsigned LType, MVT VT) const { + return VT.isSimple() && (getLoadXAction(LType, VT) == Legal || getLoadXAction(LType, VT) == Custom); } @@ -316,17 +322,19 @@ public: /// treated: either it is legal, needs to be promoted to a larger size, needs /// to be expanded to some other code sequence, or the target has a custom /// expander for it. - LegalizeAction getTruncStoreAction(MVT::ValueType ValVT, - MVT::ValueType MemVT) const { - assert(ValVT < array_lengthof(TruncStoreActions) && - MemVT < sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!"); - return (LegalizeAction)((TruncStoreActions[ValVT] >> (2*MemVT)) & 3); + LegalizeAction getTruncStoreAction(MVT ValVT, + MVT MemVT) const { + assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && + (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((TruncStoreActions[ValVT.getSimpleVT()] >> + (2*MemVT.getSimpleVT())) & 3); } /// isTruncStoreLegal - Return true if the specified store with truncation is /// legal on this target. - bool isTruncStoreLegal(MVT::ValueType ValVT, MVT::ValueType MemVT) const { - return !MVT::isExtendedVT(MemVT) && + bool isTruncStoreLegal(MVT ValVT, MVT MemVT) const { + return MemVT.isSimple() && (getTruncStoreAction(ValVT, MemVT) == Legal || getTruncStoreAction(ValVT, MemVT) == Custom); } @@ -336,16 +344,17 @@ public: /// expanded to some other code sequence, or the target has a custom expander /// for it. LegalizeAction - getIndexedLoadAction(unsigned IdxMode, MVT::ValueType VT) const { + getIndexedLoadAction(unsigned IdxMode, MVT VT) const { assert(IdxMode < array_lengthof(IndexedModeActions[0]) && - VT < sizeof(IndexedModeActions[0][0])*4 && + (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0][0])*4 && "Table isn't big enough!"); - return (LegalizeAction)((IndexedModeActions[0][IdxMode] >> (2*VT)) & 3); + return (LegalizeAction)((IndexedModeActions[0][IdxMode] >> + (2*VT.getSimpleVT())) & 3); } /// isIndexedLoadLegal - Return true if the specified indexed load is legal /// on this target. - bool isIndexedLoadLegal(unsigned IdxMode, MVT::ValueType VT) const { + bool isIndexedLoadLegal(unsigned IdxMode, MVT VT) const { return getIndexedLoadAction(IdxMode, VT) == Legal || getIndexedLoadAction(IdxMode, VT) == Custom; } @@ -355,16 +364,17 @@ public: /// expanded to some other code sequence, or the target has a custom expander /// for it. LegalizeAction - getIndexedStoreAction(unsigned IdxMode, MVT::ValueType VT) const { + getIndexedStoreAction(unsigned IdxMode, MVT VT) const { assert(IdxMode < array_lengthof(IndexedModeActions[1]) && - VT < sizeof(IndexedModeActions[1][0])*4 && + (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 && "Table isn't big enough!"); - return (LegalizeAction)((IndexedModeActions[1][IdxMode] >> (2*VT)) & 3); + return (LegalizeAction)((IndexedModeActions[1][IdxMode] >> + (2*VT.getSimpleVT())) & 3); } /// isIndexedStoreLegal - Return true if the specified indexed load is legal /// on this target. - bool isIndexedStoreLegal(unsigned IdxMode, MVT::ValueType VT) const { + bool isIndexedStoreLegal(unsigned IdxMode, MVT VT) const { return getIndexedStoreAction(IdxMode, VT) == Legal || getIndexedStoreAction(IdxMode, VT) == Custom; } @@ -374,50 +384,52 @@ public: /// expanded to some other code sequence, or the target has a custom expander /// for it. LegalizeAction - getConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT) const { - assert(FromVT < array_lengthof(ConvertActions) && - ToVT < sizeof(ConvertActions[0])*4 && "Table isn't big enough!"); - return (LegalizeAction)((ConvertActions[FromVT] >> (2*ToVT)) & 3); + getConvertAction(MVT FromVT, MVT ToVT) const { + assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && + (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT()] >> + (2*ToVT.getSimpleVT())) & 3); } /// isConvertLegal - Return true if the specified conversion is legal /// on this target. - bool isConvertLegal(MVT::ValueType FromVT, MVT::ValueType ToVT) const { + bool isConvertLegal(MVT FromVT, MVT ToVT) const { return getConvertAction(FromVT, ToVT) == Legal || getConvertAction(FromVT, ToVT) == Custom; } /// getTypeToPromoteTo - If the action for this operation is to promote, this /// method returns the ValueType to promote to. - MVT::ValueType getTypeToPromoteTo(unsigned Op, MVT::ValueType VT) const { + MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { assert(getOperationAction(Op, VT) == Promote && "This operation isn't promoted!"); // See if this has an explicit type specified. - std::map<std::pair<unsigned, MVT::ValueType>, - MVT::ValueType>::const_iterator PTTI = + std::map<std::pair<unsigned, MVT>, + MVT>::const_iterator PTTI = PromoteToType.find(std::make_pair(Op, VT)); if (PTTI != PromoteToType.end()) return PTTI->second; - - assert((MVT::isInteger(VT) || MVT::isFloatingPoint(VT)) && + + assert((VT.isInteger() || VT.isFloatingPoint()) && "Cannot autopromote this type, add it with AddPromotedToType."); - MVT::ValueType NVT = VT; + MVT NVT = VT; do { - NVT = (MVT::ValueType)(NVT+1); - assert(MVT::isInteger(NVT) == MVT::isInteger(VT) && NVT != MVT::isVoid && + NVT = (MVT::SimpleValueType)(NVT.getSimpleVT()+1); + assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && "Didn't find type to promote to!"); } while (!isTypeLegal(NVT) || getOperationAction(Op, NVT) == Promote); return NVT; } - /// getValueType - Return the MVT::ValueType corresponding to this LLVM type. + /// getValueType - Return the MVT corresponding to this LLVM type. /// This is fixed by the LLVM operations except for the pointer size. If /// AllowUnknown is true, this will return MVT::Other for types with no MVT /// counterpart (e.g. structs), otherwise it will assert. - MVT::ValueType getValueType(const Type *Ty, bool AllowUnknown = false) const { - MVT::ValueType VT = MVT::getValueType(Ty, AllowUnknown); + MVT getValueType(const Type *Ty, bool AllowUnknown = false) const { + MVT VT = MVT::getMVT(Ty, AllowUnknown); return VT == MVT::iPTR ? PointerTy : VT; } @@ -428,22 +440,22 @@ public: /// getRegisterType - Return the type of registers that this ValueType will /// eventually require. - MVT::ValueType getRegisterType(MVT::ValueType VT) const { - if (!MVT::isExtendedVT(VT)) { - assert(VT < array_lengthof(RegisterTypeForVT)); - return RegisterTypeForVT[VT]; + MVT getRegisterType(MVT VT) const { + if (VT.isSimple()) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(RegisterTypeForVT)); + return RegisterTypeForVT[VT.getSimpleVT()]; } - if (MVT::isVector(VT)) { - MVT::ValueType VT1, RegisterVT; + if (VT.isVector()) { + MVT VT1, RegisterVT; unsigned NumIntermediates; (void)getVectorTypeBreakdown(VT, VT1, NumIntermediates, RegisterVT); return RegisterVT; } - if (MVT::isInteger(VT)) { + if (VT.isInteger()) { return getRegisterType(getTypeToTransformTo(VT)); } assert(0 && "Unsupported extended type!"); - return MVT::ValueType(); // Not reached + return MVT(); // Not reached } /// getNumRegisters - Return the number of registers that this ValueType will @@ -452,19 +464,19 @@ public: /// into pieces. For types like i140, which are first promoted then expanded, /// it is the number of registers needed to hold all the bits of the original /// type. For an i140 on a 32 bit machine this means 5 registers. - unsigned getNumRegisters(MVT::ValueType VT) const { - if (!MVT::isExtendedVT(VT)) { - assert(VT < array_lengthof(NumRegistersForVT)); - return NumRegistersForVT[VT]; + unsigned getNumRegisters(MVT VT) const { + if (VT.isSimple()) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(NumRegistersForVT)); + return NumRegistersForVT[VT.getSimpleVT()]; } - if (MVT::isVector(VT)) { - MVT::ValueType VT1, VT2; + if (VT.isVector()) { + MVT VT1, VT2; unsigned NumIntermediates; return getVectorTypeBreakdown(VT, VT1, NumIntermediates, VT2); } - if (MVT::isInteger(VT)) { - unsigned BitWidth = MVT::getSizeInBits(VT); - unsigned RegWidth = MVT::getSizeInBits(getRegisterType(VT)); + if (VT.isInteger()) { + unsigned BitWidth = VT.getSizeInBits(); + unsigned RegWidth = getRegisterType(VT).getSizeInBits(); return (BitWidth + RegWidth - 1) / RegWidth; } assert(0 && "Unsupported extended type!"); @@ -474,7 +486,7 @@ public: /// ShouldShrinkFPConstant - If true, then instruction selection should /// seek to shrink the FP constant of the specified type to a smaller type /// in order to save space and / or reduce runtime. - virtual bool ShouldShrinkFPConstant(MVT::ValueType VT) const { return true; } + virtual bool ShouldShrinkFPConstant(MVT VT) const { return true; } /// hasTargetDAGCombine - If true, the target has custom DAG combine /// transformations that it can perform for the specified node. @@ -515,8 +527,8 @@ public: /// and store operations as a result of memset, memcpy, and memmove lowering. /// It returns MVT::iAny if SelectionDAG should be responsible for /// determining it. - virtual MVT::ValueType getOptimalMemOpType(uint64_t Size, unsigned Align, - bool isSrcConst, bool isSrcStr) const { + virtual MVT getOptimalMemOpType(uint64_t Size, unsigned Align, + bool isSrcConst, bool isSrcStr) const { return MVT::iAny; } @@ -687,7 +699,7 @@ public: /// SimplifySetCC - Try to simplify a setcc built with the specified operands /// and cc. If it is unable to simplify it, return a null SDOperand. - SDOperand SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, + SDOperand SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI) const; @@ -729,7 +741,7 @@ protected: /// setShiftAmountType - Describe the type that should be used for shift /// amounts. This type defaults to the pointer type. - void setShiftAmountType(MVT::ValueType VT) { ShiftAmountTy = VT; } + void setShiftAmountType(MVT VT) { ShiftAmountTy = VT; } /// setSetCCResultContents - Specify how the target extends the result of a /// setcc operation in a register. @@ -798,10 +810,10 @@ protected: /// addRegisterClass - Add the specified register class as an available /// regclass for the specified value type. This indicates the selector can /// handle values of that class natively. - void addRegisterClass(MVT::ValueType VT, TargetRegisterClass *RC) { - assert(VT < array_lengthof(RegClassForVT)); + void addRegisterClass(MVT VT, TargetRegisterClass *RC) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); AvailableRegClasses.push_back(std::make_pair(VT, RC)); - RegClassForVT[VT] = RC; + RegClassForVT[VT.getSimpleVT()] = RC; } /// computeRegisterProperties - Once all of the register classes are added, @@ -810,77 +822,82 @@ protected: /// setOperationAction - Indicate that the specified operation does not work /// with the specified type and indicate what to do about it. - void setOperationAction(unsigned Op, MVT::ValueType VT, + void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) { - assert(VT < sizeof(OpActions[0])*4 && Op < array_lengthof(OpActions) && - "Table isn't big enough!"); - OpActions[Op] &= ~(uint64_t(3UL) << VT*2); - OpActions[Op] |= (uint64_t)Action << VT*2; + assert((unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 && + Op < array_lengthof(OpActions) && "Table isn't big enough!"); + OpActions[Op] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + OpActions[Op] |= (uint64_t)Action << VT.getSimpleVT()*2; } /// setLoadXAction - Indicate that the specified load with extension does not /// work with the with specified type and indicate what to do about it. - void setLoadXAction(unsigned ExtType, MVT::ValueType VT, + void setLoadXAction(unsigned ExtType, MVT VT, LegalizeAction Action) { - assert(VT < sizeof(LoadXActions[0])*4 && + assert((unsigned)VT.getSimpleVT() < sizeof(LoadXActions[0])*4 && ExtType < array_lengthof(LoadXActions) && "Table isn't big enough!"); - LoadXActions[ExtType] &= ~(uint64_t(3UL) << VT*2); - LoadXActions[ExtType] |= (uint64_t)Action << VT*2; + LoadXActions[ExtType] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + LoadXActions[ExtType] |= (uint64_t)Action << VT.getSimpleVT()*2; } /// setTruncStoreAction - Indicate that the specified truncating store does /// not work with the with specified type and indicate what to do about it. - void setTruncStoreAction(MVT::ValueType ValVT, MVT::ValueType MemVT, + void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) { - assert(ValVT < array_lengthof(TruncStoreActions) && - MemVT < sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!"); - TruncStoreActions[ValVT] &= ~(uint64_t(3UL) << MemVT*2); - TruncStoreActions[ValVT] |= (uint64_t)Action << MemVT*2; + assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && + (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && + "Table isn't big enough!"); + TruncStoreActions[ValVT.getSimpleVT()] &= ~(uint64_t(3UL) << + MemVT.getSimpleVT()*2); + TruncStoreActions[ValVT.getSimpleVT()] |= (uint64_t)Action << + MemVT.getSimpleVT()*2; } /// setIndexedLoadAction - Indicate that the specified indexed load does or /// does not work with the with specified type and indicate what to do abort /// it. NOTE: All indexed mode loads are initialized to Expand in /// TargetLowering.cpp - void setIndexedLoadAction(unsigned IdxMode, MVT::ValueType VT, + void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action) { - assert(VT < sizeof(IndexedModeActions[0])*4 && IdxMode < - array_lengthof(IndexedModeActions[0]) && + assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0])*4 && + IdxMode < array_lengthof(IndexedModeActions[0]) && "Table isn't big enough!"); - IndexedModeActions[0][IdxMode] &= ~(uint64_t(3UL) << VT*2); - IndexedModeActions[0][IdxMode] |= (uint64_t)Action << VT*2; + IndexedModeActions[0][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + IndexedModeActions[0][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2; } /// setIndexedStoreAction - Indicate that the specified indexed store does or /// does not work with the with specified type and indicate what to do about /// it. NOTE: All indexed mode stores are initialized to Expand in /// TargetLowering.cpp - void setIndexedStoreAction(unsigned IdxMode, MVT::ValueType VT, + void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action) { - assert(VT < sizeof(IndexedModeActions[1][0])*4 && + assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 && IdxMode < array_lengthof(IndexedModeActions[1]) && "Table isn't big enough!"); - IndexedModeActions[1][IdxMode] &= ~(uint64_t(3UL) << VT*2); - IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT*2; + IndexedModeActions[1][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2; } /// setConvertAction - Indicate that the specified conversion does or does /// not work with the with specified type and indicate what to do about it. - void setConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT, + void setConvertAction(MVT FromVT, MVT ToVT, LegalizeAction Action) { - assert(FromVT < array_lengthof(ConvertActions) && - ToVT < sizeof(ConvertActions[0])*4 && "Table isn't big enough!"); - ConvertActions[FromVT] &= ~(uint64_t(3UL) << ToVT*2); - ConvertActions[FromVT] |= (uint64_t)Action << ToVT*2; + assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && + (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && + "Table isn't big enough!"); + ConvertActions[FromVT.getSimpleVT()] &= ~(uint64_t(3UL) << + ToVT.getSimpleVT()*2); + ConvertActions[FromVT.getSimpleVT()] |= (uint64_t)Action << + ToVT.getSimpleVT()*2; } /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the /// promotion code defaults to trying a larger integer/fp until it can find /// one that works. If that default is insufficient, this method can be used /// by the target to override the default. - void AddPromotedToType(unsigned Opc, MVT::ValueType OrigVT, - MVT::ValueType DestVT) { + void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { PromoteToType[std::make_pair(Opc, OrigVT)] = DestVT; } @@ -1121,7 +1138,7 @@ public: Value *CallOperandVal; /// ConstraintVT - The ValueType for the operand value. - MVT::ValueType ConstraintVT; + MVT ConstraintVT; AsmOperandInfo(const InlineAsm::ConstraintInfo &info) : InlineAsm::ConstraintInfo(info), @@ -1148,7 +1165,7 @@ public: /// This should only be used for C_RegisterClass constraints. virtual std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g. /// {edx}), return the register number and the register class for the @@ -1162,13 +1179,13 @@ public: /// this returns a register number of 0 and a null register class pointer.. virtual std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; /// LowerXConstraint - try to replace an X constraint, which matches anything, /// with another that has more specific requirements based on the type of the /// corresponding operand. This returns null if there is no replacement to /// make. - virtual const char *LowerXConstraint(MVT::ValueType ConstraintVT) const; + virtual const char *LowerXConstraint(MVT ConstraintVT) const; /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. @@ -1220,7 +1237,7 @@ public: return false; } - virtual bool isTruncateFree(MVT::ValueType VT1, MVT::ValueType VT2) const { + virtual bool isTruncateFree(MVT VT1, MVT VT2) const { return false; } @@ -1271,7 +1288,7 @@ private: /// PointerTy - The type to use for pointers, usually i32 or i64. /// - MVT::ValueType PointerTy; + MVT PointerTy; /// UsesGlobalOffsetTable - True if this target uses a GOT for PIC codegen. /// @@ -1279,7 +1296,7 @@ private: /// ShiftAmountTy - The type to use for shift amounts, usually i8 or whatever /// PointerTy is. - MVT::ValueType ShiftAmountTy; + MVT ShiftAmountTy; OutOfRangeShiftAmount ShiftAmtHandling; @@ -1352,14 +1369,14 @@ private: /// each ValueType the target supports natively. TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; - MVT::ValueType RegisterTypeForVT[MVT::LAST_VALUETYPE]; + MVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; /// TransformToType - For any value types we are promoting or expanding, this /// contains the value type that we are changing to. For Expanded types, this /// contains one step of the expand (e.g. i64 -> i32), even if there are /// multiple steps required (e.g. i64 -> i16). For types natively supported /// by the system, this holds the same type (e.g. i32 -> i32). - MVT::ValueType TransformToType[MVT::LAST_VALUETYPE]; + MVT TransformToType[MVT::LAST_VALUETYPE]; // Defines the capacity of the TargetLowering::OpActions table static const int OpActionsCapacity = 176; @@ -1396,8 +1413,7 @@ private: std::vector<APFloat> LegalFPImmediates; - std::vector<std::pair<MVT::ValueType, - TargetRegisterClass*> > AvailableRegClasses; + std::vector<std::pair<MVT, TargetRegisterClass*> > AvailableRegClasses; /// TargetDAGCombineArray - Targets can specify ISD nodes that they would /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(), @@ -1411,7 +1427,7 @@ private: /// /// Targets add entries to this map with AddPromotedToType(..), clients access /// this with getTypeToPromoteTo(..). - std::map<std::pair<unsigned, MVT::ValueType>, MVT::ValueType> PromoteToType; + std::map<std::pair<unsigned, MVT>, MVT> PromoteToType; /// LibcallRoutineNames - Stores the name each libcall. /// diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h index f606c88..08b9bfc 100644 --- a/include/llvm/Target/TargetRegisterInfo.h +++ b/include/llvm/Target/TargetRegisterInfo.h @@ -61,7 +61,7 @@ public: typedef const unsigned* iterator; typedef const unsigned* const_iterator; - typedef const MVT::ValueType* vt_iterator; + typedef const MVT* vt_iterator; typedef const TargetRegisterClass* const * sc_iterator; private: unsigned ID; @@ -76,7 +76,7 @@ private: const iterator RegsBegin, RegsEnd; public: TargetRegisterClass(unsigned id, - const MVT::ValueType *vts, + const MVT *vts, const TargetRegisterClass * const *subcs, const TargetRegisterClass * const *supcs, const TargetRegisterClass * const *subregcs, @@ -118,7 +118,7 @@ public: /// hasType - return true if this TargetRegisterClass has the ValueType vt. /// - bool hasType(MVT::ValueType vt) const { + bool hasType(MVT vt) const { for(int i = 0; VTs[i] != MVT::Other; ++i) if (VTs[i] == vt) return true; @@ -324,7 +324,7 @@ public: /// register of the given type. If type is MVT::Other, then just return any /// register class the register belongs to. const TargetRegisterClass *getPhysicalRegisterRegClass(unsigned Reg, - MVT::ValueType VT = MVT::Other) const; + MVT VT = MVT::Other) const; /// getAllocatableSet - Returns a bitset indexed by register number /// indicating if a register is allocatable or not. If a register class is diff --git a/lib/CodeGen/SelectionDAG/CallingConvLower.cpp b/lib/CodeGen/SelectionDAG/CallingConvLower.cpp index 667870d..5dcf4ac 100644 --- a/lib/CodeGen/SelectionDAG/CallingConvLower.cpp +++ b/lib/CodeGen/SelectionDAG/CallingConvLower.cpp @@ -32,8 +32,8 @@ CCState::CCState(unsigned CC, bool isVarArg, const TargetMachine &tm, // HandleByVal - Allocate a stack slot large enough to pass an argument by // value. The size and alignment information of the argument is encoded in its // parameter attribute. -void CCState::HandleByVal(unsigned ValNo, MVT::ValueType ValVT, - MVT::ValueType LocVT, CCValAssign::LocInfo LocInfo, +void CCState::HandleByVal(unsigned ValNo, MVT ValVT, + MVT LocVT, CCValAssign::LocInfo LocInfo, int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags) { unsigned Align = ArgFlags.getByValAlign(); @@ -62,12 +62,12 @@ void CCState::AnalyzeFormalArguments(SDNode *TheArgs, CCAssignFn Fn) { unsigned NumArgs = TheArgs->getNumValues()-1; for (unsigned i = 0; i != NumArgs; ++i) { - MVT::ValueType ArgVT = TheArgs->getValueType(i); + MVT ArgVT = TheArgs->getValueType(i); ISD::ArgFlagsTy ArgFlags = cast<ARG_FLAGSSDNode>(TheArgs->getOperand(3+i))->getArgFlags(); if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { cerr << "Formal argument #" << i << " has unhandled type " - << MVT::getValueTypeString(ArgVT) << "\n"; + << ArgVT.getMVTString() << "\n"; abort(); } } @@ -78,12 +78,12 @@ void CCState::AnalyzeFormalArguments(SDNode *TheArgs, CCAssignFn Fn) { void CCState::AnalyzeReturn(SDNode *TheRet, CCAssignFn Fn) { // Determine which register each value should be copied into. for (unsigned i = 0, e = TheRet->getNumOperands() / 2; i != e; ++i) { - MVT::ValueType VT = TheRet->getOperand(i*2+1).getValueType(); + MVT VT = TheRet->getOperand(i*2+1).getValueType(); ISD::ArgFlagsTy ArgFlags = cast<ARG_FLAGSSDNode>(TheRet->getOperand(i*2+2))->getArgFlags(); if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)){ cerr << "Return operand #" << i << " has unhandled type " - << MVT::getValueTypeString(VT) << "\n"; + << VT.getMVTString() << "\n"; abort(); } } @@ -95,12 +95,12 @@ void CCState::AnalyzeReturn(SDNode *TheRet, CCAssignFn Fn) { void CCState::AnalyzeCallOperands(SDNode *TheCall, CCAssignFn Fn) { unsigned NumOps = (TheCall->getNumOperands() - 5) / 2; for (unsigned i = 0; i != NumOps; ++i) { - MVT::ValueType ArgVT = TheCall->getOperand(5+2*i).getValueType(); + MVT ArgVT = TheCall->getOperand(5+2*i).getValueType(); ISD::ArgFlagsTy ArgFlags = cast<ARG_FLAGSSDNode>(TheCall->getOperand(5+2*i+1))->getArgFlags(); if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { cerr << "Call operand #" << i << " has unhandled type " - << MVT::getValueTypeString(ArgVT) << "\n"; + << ArgVT.getMVTString() << "\n"; abort(); } } @@ -110,10 +110,10 @@ void CCState::AnalyzeCallOperands(SDNode *TheCall, CCAssignFn Fn) { /// incorporating info about the passed values into this state. void CCState::AnalyzeCallResult(SDNode *TheCall, CCAssignFn Fn) { for (unsigned i = 0, e = TheCall->getNumValues() - 1; i != e; ++i) { - MVT::ValueType VT = TheCall->getValueType(i); + MVT VT = TheCall->getValueType(i); if (Fn(i, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) { cerr << "Call result #" << i << " has unhandled type " - << MVT::getValueTypeString(VT) << "\n"; + << VT.getMVTString() << "\n"; abort(); } } diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 31cbdb9..d957bf9 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -215,12 +215,12 @@ namespace { SDOperand SimplifySelectCC(SDOperand N0, SDOperand N1, SDOperand N2, SDOperand N3, ISD::CondCode CC, bool NotExtCompare = false); - SDOperand SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, + SDOperand SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, ISD::CondCode Cond, bool foldBooleans = true); SDOperand SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, unsigned HiOp); - SDOperand CombineConsecutiveLoads(SDNode *N, MVT::ValueType VT); - SDOperand ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, MVT::ValueType); + SDOperand CombineConsecutiveLoads(SDNode *N, MVT VT); + SDOperand ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, MVT); SDOperand BuildSDIV(SDNode *N); SDOperand BuildUDIV(SDNode *N); SDNode *MatchRotate(SDOperand LHS, SDOperand RHS); @@ -482,7 +482,7 @@ static bool isOneUseSetCC(SDOperand N) { } SDOperand DAGCombiner::ReassociateOps(unsigned Opc, SDOperand N0, SDOperand N1){ - MVT::ValueType VT = N0.getValueType(); + MVT VT = N0.getValueType(); // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one use // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2)) if (N0.getOpcode() == Opc && isa<ConstantSDNode>(N0.getOperand(1))) { @@ -887,7 +887,7 @@ SDOperand DAGCombiner::visitMERGE_VALUES(SDNode *N) { static SDOperand combineShlAddConstant(SDOperand N0, SDOperand N1, SelectionDAG &DAG) { - MVT::ValueType VT = N0.getValueType(); + MVT VT = N0.getValueType(); SDOperand N00 = N0.getOperand(0); SDOperand N01 = N0.getOperand(1); ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N01); @@ -904,7 +904,7 @@ SDOperand combineShlAddConstant(SDOperand N0, SDOperand N1, SelectionDAG &DAG) { static SDOperand combineSelectAndUse(SDNode *N, SDOperand Slct, SDOperand OtherOp, SelectionDAG &DAG) { - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); unsigned Opc = N->getOpcode(); bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; SDOperand LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); @@ -930,8 +930,8 @@ SDOperand combineSelectAndUse(SDNode *N, SDOperand Slct, SDOperand OtherOp, cast<ConstantSDNode>(RHS)->isNullValue()) { std::swap(LHS, RHS); SDOperand Op0 = Slct.getOperand(0); - bool isInt = MVT::isInteger(isSlctCC ? Op0.getValueType() - : Op0.getOperand(0).getValueType()); + bool isInt = (isSlctCC ? Op0.getValueType() : + Op0.getOperand(0).getValueType()).isInteger(); CC = ISD::getSetCCInverse(CC, isInt); DoXform = true; InvCC = true; @@ -956,10 +956,10 @@ SDOperand DAGCombiner::visitADD(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N0.getValueType(); + MVT VT = N0.getValueType(); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1001,14 +1001,14 @@ SDOperand DAGCombiner::visitADD(SDNode *N) { if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1)) return N1.getOperand(0); - if (!MVT::isVector(VT) && SimplifyDemandedBits(SDOperand(N, 0))) + if (!VT.isVector() && SimplifyDemandedBits(SDOperand(N, 0))) return SDOperand(N, 0); // fold (a+b) -> (a|b) iff a and b share no bits. - if (MVT::isInteger(VT) && !MVT::isVector(VT)) { + if (VT.isInteger() && !VT.isVector()) { APInt LHSZero, LHSOne; APInt RHSZero, RHSOne; - APInt Mask = APInt::getAllOnesValue(MVT::getSizeInBits(VT)); + APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits()); DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne); if (LHSZero.getBoolValue()) { DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne); @@ -1049,7 +1049,7 @@ SDOperand DAGCombiner::visitADDC(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N0.getValueType(); + MVT VT = N0.getValueType(); // If the flag result is dead, turn this into an ADD. if (N->hasNUsesOfValue(0, 1)) @@ -1069,7 +1069,7 @@ SDOperand DAGCombiner::visitADDC(SDNode *N) { // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits. APInt LHSZero, LHSOne; APInt RHSZero, RHSOne; - APInt Mask = APInt::getAllOnesValue(MVT::getSizeInBits(VT)); + APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits()); DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne); if (LHSZero.getBoolValue()) { DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne); @@ -1091,7 +1091,7 @@ SDOperand DAGCombiner::visitADDE(SDNode *N) { SDOperand CarryIn = N->getOperand(2); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - //MVT::ValueType VT = N0.getValueType(); + //MVT VT = N0.getValueType(); // canonicalize constant to RHS if (N0C && !N1C) { @@ -1115,10 +1115,10 @@ SDOperand DAGCombiner::visitSUB(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); - MVT::ValueType VT = N0.getValueType(); + MVT VT = N0.getValueType(); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1158,10 +1158,10 @@ SDOperand DAGCombiner::visitMUL(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N0.getValueType(); + MVT VT = N0.getValueType(); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1242,10 +1242,10 @@ SDOperand DAGCombiner::visitSDIV(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1261,7 +1261,7 @@ SDOperand DAGCombiner::visitSDIV(SDNode *N) { return DAG.getNode(ISD::SUB, VT, DAG.getConstant(0, VT), N0); // If we know the sign bits of both operands are zero, strength reduce to a // udiv instead. Handles (X&15) /s 4 -> X&15 >> 2 - if (!MVT::isVector(VT)) { + if (!VT.isVector()) { if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) return DAG.getNode(ISD::UDIV, N1.getValueType(), N0, N1); } @@ -1278,12 +1278,12 @@ SDOperand DAGCombiner::visitSDIV(SDNode *N) { unsigned lg2 = Log2_64(abs2); // Splat the sign bit into the register SDOperand SGN = DAG.getNode(ISD::SRA, VT, N0, - DAG.getConstant(MVT::getSizeInBits(VT)-1, + DAG.getConstant(VT.getSizeInBits()-1, TLI.getShiftAmountTy())); AddToWorkList(SGN.Val); // Add (N0 < 0) ? abs2 - 1 : 0; SDOperand SRL = DAG.getNode(ISD::SRL, VT, SGN, - DAG.getConstant(MVT::getSizeInBits(VT)-lg2, + DAG.getConstant(VT.getSizeInBits()-lg2, TLI.getShiftAmountTy())); SDOperand ADD = DAG.getNode(ISD::ADD, VT, N0, SRL); AddToWorkList(SRL.Val); @@ -1320,10 +1320,10 @@ SDOperand DAGCombiner::visitUDIV(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1340,7 +1340,7 @@ SDOperand DAGCombiner::visitUDIV(SDNode *N) { if (N1.getOpcode() == ISD::SHL) { if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) { if (SHC->getAPIntValue().isPowerOf2()) { - MVT::ValueType ADDVT = N1.getOperand(1).getValueType(); + MVT ADDVT = N1.getOperand(1).getValueType(); SDOperand Add = DAG.getNode(ISD::ADD, ADDVT, N1.getOperand(1), DAG.getConstant(SHC->getAPIntValue() .logBase2(), @@ -1371,14 +1371,14 @@ SDOperand DAGCombiner::visitSREM(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (srem c1, c2) -> c1%c2 if (N0C && N1C && !N1C->isNullValue()) return DAG.getNode(ISD::SREM, VT, N0, N1); // If we know the sign bits of both operands are zero, strength reduce to a // urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15 - if (!MVT::isVector(VT)) { + if (!VT.isVector()) { if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) return DAG.getNode(ISD::UREM, VT, N0, N1); } @@ -1412,7 +1412,7 @@ SDOperand DAGCombiner::visitUREM(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (urem c1, c2) -> c1%c2 if (N0C && N1C && !N1C->isNullValue()) @@ -1427,7 +1427,7 @@ SDOperand DAGCombiner::visitUREM(SDNode *N) { if (SHC->getAPIntValue().isPowerOf2()) { SDOperand Add = DAG.getNode(ISD::ADD, VT, N1, - DAG.getConstant(APInt::getAllOnesValue(MVT::getSizeInBits(VT)), + DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT)); AddToWorkList(Add.Val); return DAG.getNode(ISD::AND, VT, N0, Add); @@ -1462,7 +1462,7 @@ SDOperand DAGCombiner::visitMULHS(SDNode *N) { SDOperand N0 = N->getOperand(0); SDOperand N1 = N->getOperand(1); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (mulhs x, 0) -> 0 if (N1C && N1C->isNullValue()) @@ -1470,7 +1470,7 @@ SDOperand DAGCombiner::visitMULHS(SDNode *N) { // fold (mulhs x, 1) -> (sra x, size(x)-1) if (N1C && N1C->getAPIntValue() == 1) return DAG.getNode(ISD::SRA, N0.getValueType(), N0, - DAG.getConstant(MVT::getSizeInBits(N0.getValueType())-1, + DAG.getConstant(N0.getValueType().getSizeInBits()-1, TLI.getShiftAmountTy())); // fold (mulhs x, undef) -> 0 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) @@ -1483,7 +1483,7 @@ SDOperand DAGCombiner::visitMULHU(SDNode *N) { SDOperand N0 = N->getOperand(0); SDOperand N1 = N->getOperand(1); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (mulhu x, 0) -> 0 if (N1C && N1C->isNullValue()) @@ -1583,7 +1583,7 @@ SDOperand DAGCombiner::visitUDIVREM(SDNode *N) { /// two operands of the same opcode, try to simplify it. SDOperand DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { SDOperand N0 = N->getOperand(0), N1 = N->getOperand(1); - MVT::ValueType VT = N0.getValueType(); + MVT VT = N0.getValueType(); assert(N0.getOpcode() == N1.getOpcode() && "Bad input!"); // For each of OP in AND/OR/XOR: @@ -1624,11 +1624,11 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { SDOperand LL, LR, RL, RR, CC0, CC1; ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N1.getValueType(); - unsigned BitWidth = MVT::getSizeInBits(VT); + MVT VT = N1.getValueType(); + unsigned BitWidth = VT.getSizeInBits(); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1683,7 +1683,7 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 && - MVT::isInteger(LL.getValueType())) { + LL.getValueType().isInteger()) { // fold (X == 0) & (Y == 0) -> (X|Y == 0) if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) { SDOperand ORNode = DAG.getNode(ISD::OR, LR.getValueType(), LL, RL); @@ -1709,7 +1709,7 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { std::swap(RL, RR); } if (LL == RL && LR == RR) { - bool isInteger = MVT::isInteger(LL.getValueType()); + bool isInteger = LL.getValueType().isInteger(); ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger); if (Result != ISD::SETCC_INVALID) return DAG.getSetCC(N0.getValueType(), LL, LR, Result); @@ -1724,18 +1724,18 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1) // fold (and (sra)) -> (and (srl)) when possible. - if (!MVT::isVector(VT) && + if (!VT.isVector() && SimplifyDemandedBits(SDOperand(N, 0))) return SDOperand(N, 0); // fold (zext_inreg (extload x)) -> (zextload x) if (ISD::isEXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val)) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); - MVT::ValueType EVT = LN0->getMemoryVT(); + MVT EVT = LN0->getMemoryVT(); // If we zero all the possible extended bits, then we can turn this into // a zextload if we are running before legalize or the operation is legal. unsigned BitWidth = N1.getValueSizeInBits(); if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, - BitWidth - MVT::getSizeInBits(EVT))) && + BitWidth - EVT.getSizeInBits())) && (!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) { SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), @@ -1751,12 +1751,12 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { if (ISD::isSEXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val) && N0.hasOneUse()) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); - MVT::ValueType EVT = LN0->getMemoryVT(); + MVT EVT = LN0->getMemoryVT(); // If we zero all the possible extended bits, then we can turn this into // a zextload if we are running before legalize or the operation is legal. unsigned BitWidth = N1.getValueSizeInBits(); if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, - BitWidth - MVT::getSizeInBits(EVT))) && + BitWidth - EVT.getSizeInBits())) && (!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) { SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), @@ -1775,7 +1775,7 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); if (LN0->getExtensionType() != ISD::SEXTLOAD && LN0->isUnindexed() && N0.hasOneUse()) { - MVT::ValueType EVT, LoadedVT; + MVT EVT, LoadedVT; if (N1C->getAPIntValue() == 255) EVT = MVT::i8; else if (N1C->getAPIntValue() == 65535) @@ -1788,12 +1788,12 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { LoadedVT = LN0->getMemoryVT(); if (EVT != MVT::Other && LoadedVT > EVT && (!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) { - MVT::ValueType PtrType = N0.getOperand(1).getValueType(); + MVT PtrType = N0.getOperand(1).getValueType(); // For big endian targets, we need to add an offset to the pointer to // load the correct bytes. For little endian systems, we merely need to // read fewer bytes from the same pointer. - unsigned LVTStoreBytes = MVT::getStoreSizeInBits(LoadedVT)/8; - unsigned EVTStoreBytes = MVT::getStoreSizeInBits(EVT)/8; + unsigned LVTStoreBytes = LoadedVT.getStoreSizeInBits()/8; + unsigned EVTStoreBytes = EVT.getStoreSizeInBits()/8; unsigned PtrOff = LVTStoreBytes - EVTStoreBytes; unsigned Alignment = LN0->getAlignment(); SDOperand NewPtr = LN0->getBasePtr(); @@ -1823,10 +1823,10 @@ SDOperand DAGCombiner::visitOR(SDNode *N) { SDOperand LL, LR, RL, RR, CC0, CC1; ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N1.getValueType(); + MVT VT = N1.getValueType(); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1868,7 +1868,7 @@ SDOperand DAGCombiner::visitOR(SDNode *N) { ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 && - MVT::isInteger(LL.getValueType())) { + LL.getValueType().isInteger()) { // fold (X != 0) | (Y != 0) -> (X|Y != 0) // fold (X < 0) | (Y < 0) -> (X|Y < 0) if (cast<ConstantSDNode>(LR)->isNullValue() && @@ -1892,7 +1892,7 @@ SDOperand DAGCombiner::visitOR(SDNode *N) { std::swap(RL, RR); } if (LL == RL && LR == RR) { - bool isInteger = MVT::isInteger(LL.getValueType()); + bool isInteger = LL.getValueType().isInteger(); ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger); if (Result != ISD::SETCC_INVALID) return DAG.getSetCC(N0.getValueType(), LL, LR, Result); @@ -1959,7 +1959,7 @@ static bool MatchRotateHalf(SDOperand Op, SDOperand &Shift, SDOperand &Mask) { // a rot[lr]. SDNode *DAGCombiner::MatchRotate(SDOperand LHS, SDOperand RHS) { // Must be a legal type. Expanded an promoted things won't work with rotates. - MVT::ValueType VT = LHS.getValueType(); + MVT VT = LHS.getValueType(); if (!TLI.isTypeLegal(VT)) return 0; // The target must have at least one rotate flavor. @@ -1991,7 +1991,7 @@ SDNode *DAGCombiner::MatchRotate(SDOperand LHS, SDOperand RHS) { std::swap(LHSMask , RHSMask ); } - unsigned OpSizeInBits = MVT::getSizeInBits(VT); + unsigned OpSizeInBits = VT.getSizeInBits(); SDOperand LHSShiftArg = LHSShift.getOperand(0); SDOperand LHSShiftAmt = LHSShift.getOperand(1); SDOperand RHSShiftAmt = RHSShift.getOperand(1); @@ -2115,10 +2115,10 @@ SDOperand DAGCombiner::visitXOR(SDNode *N) { SDOperand LHS, RHS, CC; ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N0.getValueType(); + MVT VT = N0.getValueType(); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -2146,7 +2146,7 @@ SDOperand DAGCombiner::visitXOR(SDNode *N) { return RXOR; // fold !(x cc y) -> (x !cc y) if (N1C && N1C->getAPIntValue() == 1 && isSetCCEquivalent(N0, LHS, RHS, CC)) { - bool isInt = MVT::isInteger(LHS.getValueType()); + bool isInt = LHS.getValueType().isInteger(); ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), isInt); if (N0.getOpcode() == ISD::SETCC) @@ -2205,12 +2205,12 @@ SDOperand DAGCombiner::visitXOR(SDNode *N) { } // fold (xor x, x) -> 0 if (N0 == N1) { - if (!MVT::isVector(VT)) { + if (!VT.isVector()) { return DAG.getConstant(0, VT); } else if (!AfterLegalize || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) { // Produce a vector of zeros. - SDOperand El = DAG.getConstant(0, MVT::getVectorElementType(VT)); - std::vector<SDOperand> Ops(MVT::getVectorNumElements(VT), El); + SDOperand El = DAG.getConstant(0, VT.getVectorElementType()); + std::vector<SDOperand> Ops(VT.getVectorNumElements(), El); return DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); } } @@ -2222,7 +2222,7 @@ SDOperand DAGCombiner::visitXOR(SDNode *N) { } // Simplify the expression using non-local knowledge. - if (!MVT::isVector(VT) && + if (!VT.isVector() && SimplifyDemandedBits(SDOperand(N, 0))) return SDOperand(N, 0); @@ -2274,7 +2274,7 @@ SDOperand DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) { !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1))) return SDOperand(); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // If this is a signed shift right, and the high bit is modified // by the logical operation, do not perform the transformation. @@ -2305,8 +2305,8 @@ SDOperand DAGCombiner::visitSHL(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N0.getValueType(); - unsigned OpSizeInBits = MVT::getSizeInBits(VT); + MVT VT = N0.getValueType(); + unsigned OpSizeInBits = VT.getSizeInBits(); // fold (shl c1, c2) -> c1<<c2 if (N0C && N1C) @@ -2322,7 +2322,7 @@ SDOperand DAGCombiner::visitSHL(SDNode *N) { return N0; // if (shl x, c) is known to be zero, return 0 if (DAG.MaskedValueIsZero(SDOperand(N, 0), - APInt::getAllOnesValue(MVT::getSizeInBits(VT)))) + APInt::getAllOnesValue(VT.getSizeInBits()))) return DAG.getConstant(0, VT); if (N1C && SimplifyDemandedBits(SDOperand(N, 0))) return SDOperand(N, 0); @@ -2364,7 +2364,7 @@ SDOperand DAGCombiner::visitSRA(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N0.getValueType(); + MVT VT = N0.getValueType(); // fold (sra c1, c2) -> c1>>c2 if (N0C && N1C) @@ -2376,7 +2376,7 @@ SDOperand DAGCombiner::visitSRA(SDNode *N) { if (N0C && N0C->isAllOnesValue()) return N0; // fold (sra x, c >= size(x)) -> undef - if (N1C && N1C->getValue() >= MVT::getSizeInBits(VT)) + if (N1C && N1C->getValue() >= VT.getSizeInBits()) return DAG.getNode(ISD::UNDEF, VT); // fold (sra x, 0) -> x if (N1C && N1C->isNullValue()) @@ -2384,8 +2384,8 @@ SDOperand DAGCombiner::visitSRA(SDNode *N) { // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports // sext_inreg. if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) { - unsigned LowBits = MVT::getSizeInBits(VT) - (unsigned)N1C->getValue(); - MVT::ValueType EVT; + unsigned LowBits = VT.getSizeInBits() - (unsigned)N1C->getValue(); + MVT EVT; switch (LowBits) { default: EVT = MVT::Other; break; case 1: EVT = MVT::i1; break; @@ -2402,7 +2402,7 @@ SDOperand DAGCombiner::visitSRA(SDNode *N) { if (N1C && N0.getOpcode() == ISD::SRA) { if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { unsigned Sum = N1C->getValue() + C1->getValue(); - if (Sum >= MVT::getSizeInBits(VT)) Sum = MVT::getSizeInBits(VT)-1; + if (Sum >= VT.getSizeInBits()) Sum = VT.getSizeInBits()-1; return DAG.getNode(ISD::SRA, VT, N0.getOperand(0), DAG.getConstant(Sum, N1C->getValueType(0))); } @@ -2418,8 +2418,9 @@ SDOperand DAGCombiner::visitSRA(SDNode *N) { const ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); if (N01C && N1C) { // Determine what the truncate's result bitsize and type would be. - unsigned VTValSize = MVT::getSizeInBits(VT); - MVT::ValueType TruncVT = MVT::getIntegerType(VTValSize - N1C->getValue()); + unsigned VTValSize = VT.getSizeInBits(); + MVT TruncVT = + MVT::getIntegerVT(VTValSize - N1C->getValue()); // Determine the residual right-shift amount. unsigned ShiftAmt = N1C->getValue() - N01C->getValue(); @@ -2458,8 +2459,8 @@ SDOperand DAGCombiner::visitSRL(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); - MVT::ValueType VT = N0.getValueType(); - unsigned OpSizeInBits = MVT::getSizeInBits(VT); + MVT VT = N0.getValueType(); + unsigned OpSizeInBits = VT.getSizeInBits(); // fold (srl c1, c2) -> c1 >>u c2 if (N0C && N1C) @@ -2492,8 +2493,8 @@ SDOperand DAGCombiner::visitSRL(SDNode *N) { // fold (srl (anyextend x), c) -> (anyextend (srl x, c)) if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { // Shifting in all undef bits? - MVT::ValueType SmallVT = N0.getOperand(0).getValueType(); - if (N1C->getValue() >= MVT::getSizeInBits(SmallVT)) + MVT SmallVT = N0.getOperand(0).getValueType(); + if (N1C->getValue() >= SmallVT.getSizeInBits()) return DAG.getNode(ISD::UNDEF, VT); SDOperand SmallShift = DAG.getNode(ISD::SRL, SmallVT, N0.getOperand(0), N1); @@ -2503,16 +2504,16 @@ SDOperand DAGCombiner::visitSRL(SDNode *N) { // fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign // bit, which is unmodified by sra. - if (N1C && N1C->getValue()+1 == MVT::getSizeInBits(VT)) { + if (N1C && N1C->getValue()+1 == VT.getSizeInBits()) { if (N0.getOpcode() == ISD::SRA) return DAG.getNode(ISD::SRL, VT, N0.getOperand(0), N1); } // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit). if (N1C && N0.getOpcode() == ISD::CTLZ && - N1C->getAPIntValue() == Log2_32(MVT::getSizeInBits(VT))) { + N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) { APInt KnownZero, KnownOne; - APInt Mask = APInt::getAllOnesValue(MVT::getSizeInBits(VT)); + APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits()); DAG.ComputeMaskedBits(N0.getOperand(0), Mask, KnownZero, KnownOne); // If any of the input bits are KnownOne, then the input couldn't be all @@ -2551,7 +2552,7 @@ SDOperand DAGCombiner::visitSRL(SDNode *N) { SDOperand DAGCombiner::visitCTLZ(SDNode *N) { SDOperand N0 = N->getOperand(0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (ctlz c1) -> c2 if (isa<ConstantSDNode>(N0)) @@ -2561,7 +2562,7 @@ SDOperand DAGCombiner::visitCTLZ(SDNode *N) { SDOperand DAGCombiner::visitCTTZ(SDNode *N) { SDOperand N0 = N->getOperand(0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (cttz c1) -> c2 if (isa<ConstantSDNode>(N0)) @@ -2571,7 +2572,7 @@ SDOperand DAGCombiner::visitCTTZ(SDNode *N) { SDOperand DAGCombiner::visitCTPOP(SDNode *N) { SDOperand N0 = N->getOperand(0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (ctpop c1) -> c2 if (isa<ConstantSDNode>(N0)) @@ -2586,8 +2587,8 @@ SDOperand DAGCombiner::visitSELECT(SDNode *N) { ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType VT0 = N0.getValueType(); + MVT VT = N->getValueType(0); + MVT VT0 = N0.getValueType(); // fold select C, X, X -> X if (N1 == N2) @@ -2599,16 +2600,16 @@ SDOperand DAGCombiner::visitSELECT(SDNode *N) { if (N0C && N0C->isNullValue()) return N2; // fold select C, 1, X -> C | X - if (MVT::i1 == VT && N1C && N1C->getAPIntValue() == 1) + if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1) return DAG.getNode(ISD::OR, VT, N0, N2); // fold select C, 0, 1 -> ~C - if (MVT::isInteger(VT) && MVT::isInteger(VT0) && + if (VT.isInteger() && VT0.isInteger() && N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) { SDOperand XORNode = DAG.getNode(ISD::XOR, VT0, N0, DAG.getConstant(1, VT0)); if (VT == VT0) return XORNode; AddToWorkList(XORNode.Val); - if (MVT::getSizeInBits(VT) > MVT::getSizeInBits(VT0)) + if (VT.getSizeInBits() > VT0.getSizeInBits()) return DAG.getNode(ISD::ZERO_EXTEND, VT, XORNode); return DAG.getNode(ISD::TRUNCATE, VT, XORNode); } @@ -2626,13 +2627,13 @@ SDOperand DAGCombiner::visitSELECT(SDNode *N) { } // fold select C, X, 0 -> C & X // FIXME: this should check for C type == X type, not i1? - if (MVT::i1 == VT && N2C && N2C->isNullValue()) + if (VT == MVT::i1 && N2C && N2C->isNullValue()) return DAG.getNode(ISD::AND, VT, N0, N1); // fold X ? X : Y --> X ? 1 : Y --> X | Y - if (MVT::i1 == VT && N0 == N1) + if (VT == MVT::i1 && N0 == N1) return DAG.getNode(ISD::OR, VT, N0, N2); // fold X ? Y : X --> X ? Y : 0 --> X & Y - if (MVT::i1 == VT && N0 == N2) + if (VT == MVT::i1 && N0 == N2) return DAG.getNode(ISD::AND, VT, N0, N1); // If we can fold this based on the true/false value, do so. @@ -2766,7 +2767,7 @@ static bool ExtendUsesToFormExtLoad(SDNode *N, SDOperand N0, SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { SDOperand N0 = N->getOperand(0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (sext c1) -> c1 if (isa<ConstantSDNode>(N0)) @@ -2790,9 +2791,9 @@ SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { // See if the value being truncated is already sign extended. If so, just // eliminate the trunc/sext pair. SDOperand Op = N0.getOperand(0); - unsigned OpBits = MVT::getSizeInBits(Op.getValueType()); - unsigned MidBits = MVT::getSizeInBits(N0.getValueType()); - unsigned DestBits = MVT::getSizeInBits(VT); + unsigned OpBits = Op.getValueType().getSizeInBits(); + unsigned MidBits = N0.getValueType().getSizeInBits(); + unsigned DestBits = VT.getSizeInBits(); unsigned NumSignBits = DAG.ComputeNumSignBits(Op); if (OpBits == DestBits) { @@ -2866,7 +2867,7 @@ SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { if ((ISD::isSEXTLoad(N0.Val) || ISD::isEXTLoad(N0.Val)) && ISD::isUNINDEXEDLoad(N0.Val) && N0.hasOneUse()) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); - MVT::ValueType EVT = LN0->getMemoryVT(); + MVT EVT = LN0->getMemoryVT(); if (!AfterLegalize || TLI.isLoadXLegal(ISD::SEXTLOAD, EVT)) { SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), @@ -2899,7 +2900,7 @@ SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { SDOperand N0 = N->getOperand(0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (zext c1) -> c1 if (isa<ConstantSDNode>(N0)) @@ -2943,7 +2944,7 @@ SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { X = DAG.getNode(ISD::TRUNCATE, VT, X); } APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); - Mask.zext(MVT::getSizeInBits(VT)); + Mask.zext(VT.getSizeInBits()); return DAG.getNode(ISD::AND, VT, X, DAG.getConstant(Mask, VT)); } @@ -2989,7 +2990,7 @@ SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { if ((ISD::isZEXTLoad(N0.Val) || ISD::isEXTLoad(N0.Val)) && ISD::isUNINDEXEDLoad(N0.Val) && N0.hasOneUse()) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); - MVT::ValueType EVT = LN0->getMemoryVT(); + MVT EVT = LN0->getMemoryVT(); SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), EVT, @@ -3015,7 +3016,7 @@ SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { SDOperand N0 = N->getOperand(0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (aext c1) -> c1 if (isa<ConstantSDNode>(N0)) @@ -3060,7 +3061,7 @@ SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { X = DAG.getNode(ISD::TRUNCATE, VT, X); } APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); - Mask.zext(MVT::getSizeInBits(VT)); + Mask.zext(VT.getSizeInBits()); return DAG.getNode(ISD::AND, VT, X, DAG.getConstant(Mask, VT)); } @@ -3087,7 +3088,7 @@ SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { !ISD::isNON_EXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val) && N0.hasOneUse()) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); - MVT::ValueType EVT = LN0->getMemoryVT(); + MVT EVT = LN0->getMemoryVT(); SDOperand ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), @@ -3154,8 +3155,8 @@ SDOperand DAGCombiner::ReduceLoadWidth(SDNode *N) { unsigned Opc = N->getOpcode(); ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; SDOperand N0 = N->getOperand(0); - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType EVT = N->getValueType(0); + MVT VT = N->getValueType(0); + MVT EVT = N->getValueType(0); // Special case: SIGN_EXTEND_INREG is basically truncating to EVT then // extended to VT. @@ -3166,7 +3167,7 @@ SDOperand DAGCombiner::ReduceLoadWidth(SDNode *N) { return SDOperand(); } - unsigned EVTBits = MVT::getSizeInBits(EVT); + unsigned EVTBits = EVT.getSizeInBits(); unsigned ShAmt = 0; bool CombineSRL = false; if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { @@ -3175,7 +3176,7 @@ SDOperand DAGCombiner::ReduceLoadWidth(SDNode *N) { // Is the shift amount a multiple of size of VT? if ((ShAmt & (EVTBits-1)) == 0) { N0 = N0.getOperand(0); - if (MVT::getSizeInBits(N0.getValueType()) <= EVTBits) + if (N0.getValueType().getSizeInBits() <= EVTBits) return SDOperand(); CombineSRL = true; } @@ -3188,15 +3189,15 @@ SDOperand DAGCombiner::ReduceLoadWidth(SDNode *N) { // that it is already zero extended. // FIXME: This should be reevaluated. VT != MVT::i1) { - assert(MVT::getSizeInBits(N0.getValueType()) > EVTBits && + assert(N0.getValueType().getSizeInBits() > EVTBits && "Cannot truncate to larger type!"); LoadSDNode *LN0 = cast<LoadSDNode>(N0); - MVT::ValueType PtrType = N0.getOperand(1).getValueType(); + MVT PtrType = N0.getOperand(1).getValueType(); // For big endian targets, we need to adjust the offset to the pointer to // load the correct bytes. if (TLI.isBigEndian()) { - unsigned LVTStoreBits = MVT::getStoreSizeInBits(N0.getValueType()); - unsigned EVTStoreBits = MVT::getStoreSizeInBits(EVT); + unsigned LVTStoreBits = N0.getValueType().getStoreSizeInBits(); + unsigned EVTStoreBits = EVT.getStoreSizeInBits(); ShAmt = LVTStoreBits - EVTStoreBits - ShAmt; } uint64_t PtrOff = ShAmt / 8; @@ -3235,17 +3236,17 @@ SDOperand DAGCombiner::ReduceLoadWidth(SDNode *N) { SDOperand DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { SDOperand N0 = N->getOperand(0); SDOperand N1 = N->getOperand(1); - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType EVT = cast<VTSDNode>(N1)->getVT(); - unsigned VTBits = MVT::getSizeInBits(VT); - unsigned EVTBits = MVT::getSizeInBits(EVT); + MVT VT = N->getValueType(0); + MVT EVT = cast<VTSDNode>(N1)->getVT(); + unsigned VTBits = VT.getSizeInBits(); + unsigned EVTBits = EVT.getSizeInBits(); // fold (sext_in_reg c1) -> c1 if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF) return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, N0, N1); // If the input is already sign extended, just drop the extension. - if (DAG.ComputeNumSignBits(N0) >= MVT::getSizeInBits(VT)-EVTBits+1) + if (DAG.ComputeNumSignBits(N0) >= VT.getSizeInBits()-EVTBits+1) return N0; // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2 @@ -3274,11 +3275,11 @@ SDOperand DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above. if (N0.getOpcode() == ISD::SRL) { if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1))) - if (ShAmt->getValue()+EVTBits <= MVT::getSizeInBits(VT)) { + if (ShAmt->getValue()+EVTBits <= VT.getSizeInBits()) { // We can turn this into an SRA iff the input to the SRL is already sign // extended enough. unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0)); - if (MVT::getSizeInBits(VT)-(ShAmt->getValue()+EVTBits) < InSignBits) + if (VT.getSizeInBits()-(ShAmt->getValue()+EVTBits) < InSignBits) return DAG.getNode(ISD::SRA, VT, N0.getOperand(0), N0.getOperand(1)); } } @@ -3318,7 +3319,7 @@ SDOperand DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { SDOperand DAGCombiner::visitTRUNCATE(SDNode *N) { SDOperand N0 = N->getOperand(0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // noop truncate if (N0.getValueType() == N->getValueType(0)) @@ -3349,7 +3350,7 @@ SDOperand DAGCombiner::visitTRUNCATE(SDNode *N) { // -> trunc y SDOperand Shorter = GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(), - MVT::getSizeInBits(VT))); + VT.getSizeInBits())); if (Shorter.Val) return DAG.getNode(ISD::TRUNCATE, VT, Shorter); @@ -3367,22 +3368,22 @@ static SDNode *getBuildPairElt(SDNode *N, unsigned i) { /// CombineConsecutiveLoads - build_pair (load, load) -> load /// if load locations are consecutive. -SDOperand DAGCombiner::CombineConsecutiveLoads(SDNode *N, MVT::ValueType VT) { +SDOperand DAGCombiner::CombineConsecutiveLoads(SDNode *N, MVT VT) { assert(N->getOpcode() == ISD::BUILD_PAIR); SDNode *LD1 = getBuildPairElt(N, 0); if (!ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse()) return SDOperand(); - MVT::ValueType LD1VT = LD1->getValueType(0); + MVT LD1VT = LD1->getValueType(0); SDNode *LD2 = getBuildPairElt(N, 1); const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); if (ISD::isNON_EXTLoad(LD2) && LD2->hasOneUse() && - TLI.isConsecutiveLoad(LD2, LD1, MVT::getSizeInBits(LD1VT)/8, 1, MFI)) { + TLI.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1, MFI)) { LoadSDNode *LD = cast<LoadSDNode>(LD1); unsigned Align = LD->getAlignment(); unsigned NewAlign = TLI.getTargetMachine().getTargetData()-> - getABITypeAlignment(MVT::getTypeForValueType(VT)); + getABITypeAlignment(VT.getTypeForMVT()); if ((!AfterLegalize || TLI.isTypeLegal(VT)) && TLI.isOperationLegal(ISD::LOAD, VT) && NewAlign <= Align) return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), @@ -3394,7 +3395,7 @@ SDOperand DAGCombiner::CombineConsecutiveLoads(SDNode *N, MVT::ValueType VT) { SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { SDOperand N0 = N->getOperand(0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // If the input is a BUILD_VECTOR with all constant elements, fold this now. // Only do this before legalize, since afterward the target may be depending @@ -3402,7 +3403,7 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { // First check to see if this is all constant. if (!AfterLegalize && N0.getOpcode() == ISD::BUILD_VECTOR && N0.Val->hasOneUse() && - MVT::isVector(VT)) { + VT.isVector()) { bool isSimple = true; for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) if (N0.getOperand(i).getOpcode() != ISD::UNDEF && @@ -3412,8 +3413,8 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { break; } - MVT::ValueType DestEltVT = MVT::getVectorElementType(N->getValueType(0)); - assert(!MVT::isVector(DestEltVT) && + MVT DestEltVT = N->getValueType(0).getVectorElementType(); + assert(!DestEltVT.isVector() && "Element type of vector ValueType must not be vector!"); if (isSimple) { return ConstantFoldBIT_CONVERTofBUILD_VECTOR(N0.Val, DestEltVT); @@ -3435,7 +3436,7 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { TLI.isOperationLegal(ISD::LOAD, VT)) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); unsigned Align = TLI.getTargetMachine().getTargetData()-> - getABITypeAlignment(MVT::getTypeForValueType(VT)); + getABITypeAlignment(VT.getTypeForMVT()); unsigned OrigAlign = LN0->getAlignment(); if (Align <= OrigAlign) { SDOperand Load = DAG.getLoad(VT, LN0->getChain(), LN0->getBasePtr(), @@ -3452,11 +3453,11 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { // Fold bitconvert(fabs(x)) -> and(bitconvert(x), ~signbit) // This often reduces constant pool loads. if ((N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FABS) && - N0.Val->hasOneUse() && MVT::isInteger(VT) && !MVT::isVector(VT)) { + N0.Val->hasOneUse() && VT.isInteger() && !VT.isVector()) { SDOperand NewConv = DAG.getNode(ISD::BIT_CONVERT, VT, N0.getOperand(0)); AddToWorkList(NewConv.Val); - APInt SignBit = APInt::getSignBit(MVT::getSizeInBits(VT)); + APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); if (N0.getOpcode() == ISD::FNEG) return DAG.getNode(ISD::XOR, VT, NewConv, DAG.getConstant(SignBit, VT)); assert(N0.getOpcode() == ISD::FABS); @@ -3468,14 +3469,15 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { // to an fneg or fabs. if (N0.getOpcode() == ISD::FCOPYSIGN && N0.Val->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(0)) && - MVT::isInteger(VT) && !MVT::isVector(VT)) { - unsigned OrigXWidth = MVT::getSizeInBits(N0.getOperand(1).getValueType()); - SDOperand X = DAG.getNode(ISD::BIT_CONVERT, MVT::getIntegerType(OrigXWidth), + VT.isInteger() && !VT.isVector()) { + unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits(); + SDOperand X = DAG.getNode(ISD::BIT_CONVERT, + MVT::getIntegerVT(OrigXWidth), N0.getOperand(1)); AddToWorkList(X.Val); // If X has a different width than the result/lhs, sext it or truncate it. - unsigned VTWidth = MVT::getSizeInBits(VT); + unsigned VTWidth = VT.getSizeInBits(); if (OrigXWidth < VTWidth) { X = DAG.getNode(ISD::SIGN_EXTEND, VT, X); AddToWorkList(X.Val); @@ -3489,7 +3491,7 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { AddToWorkList(X.Val); } - APInt SignBit = APInt::getSignBit(MVT::getSizeInBits(VT)); + APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); X = DAG.getNode(ISD::AND, VT, X, DAG.getConstant(SignBit, VT)); AddToWorkList(X.Val); @@ -3511,7 +3513,7 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { } SDOperand DAGCombiner::visitBUILD_PAIR(SDNode *N) { - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); return CombineConsecutiveLoads(N, VT); } @@ -3519,14 +3521,14 @@ SDOperand DAGCombiner::visitBUILD_PAIR(SDNode *N) { /// node with Constant, ConstantFP or Undef operands. DstEltVT indicates the /// destination element value type. SDOperand DAGCombiner:: -ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT::ValueType DstEltVT) { - MVT::ValueType SrcEltVT = BV->getOperand(0).getValueType(); +ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) { + MVT SrcEltVT = BV->getOperand(0).getValueType(); // If this is already the right type, we're done. if (SrcEltVT == DstEltVT) return SDOperand(BV, 0); - unsigned SrcBitSize = MVT::getSizeInBits(SrcEltVT); - unsigned DstBitSize = MVT::getSizeInBits(DstEltVT); + unsigned SrcBitSize = SrcEltVT.getSizeInBits(); + unsigned DstBitSize = DstEltVT.getSizeInBits(); // If this is a conversion of N elements of one type to N elements of another // type, convert each element. This handles FP<->INT cases. @@ -3536,29 +3538,28 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT::ValueType DstEltVT) { Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, DstEltVT, BV->getOperand(i))); AddToWorkList(Ops.back().Val); } - MVT::ValueType VT = - MVT::getVectorType(DstEltVT, - MVT::getVectorNumElements(BV->getValueType(0))); + MVT VT = MVT::getVectorVT(DstEltVT, + BV->getValueType(0).getVectorNumElements()); return DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); } // Otherwise, we're growing or shrinking the elements. To avoid having to // handle annoying details of growing/shrinking FP values, we convert them to // int first. - if (MVT::isFloatingPoint(SrcEltVT)) { + if (SrcEltVT.isFloatingPoint()) { // Convert the input float vector to a int vector where the elements are the // same sizes. assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!"); - MVT::ValueType IntVT = SrcEltVT == MVT::f32 ? MVT::i32 : MVT::i64; + MVT IntVT = SrcEltVT == MVT::f32 ? MVT::i32 : MVT::i64; BV = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, IntVT).Val; SrcEltVT = IntVT; } // Now we know the input is an integer vector. If the output is a FP type, // convert to integer first, then to FP of the right size. - if (MVT::isFloatingPoint(DstEltVT)) { + if (DstEltVT.isFloatingPoint()) { assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!"); - MVT::ValueType TmpVT = DstEltVT == MVT::f32 ? MVT::i32 : MVT::i64; + MVT TmpVT = DstEltVT == MVT::f32 ? MVT::i32 : MVT::i64; SDNode *Tmp = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, TmpVT).Val; // Next, convert to FP elements of the same size. @@ -3567,7 +3568,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT::ValueType DstEltVT) { // Okay, we know the src/dst types are both integers of differing types. // Handling growing first. - assert(MVT::isInteger(SrcEltVT) && MVT::isInteger(DstEltVT)); + assert(SrcEltVT.isInteger() && DstEltVT.isInteger()); if (SrcBitSize < DstBitSize) { unsigned NumInputsPerOutput = DstBitSize/SrcBitSize; @@ -3594,7 +3595,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT::ValueType DstEltVT) { Ops.push_back(DAG.getConstant(NewBits, DstEltVT)); } - MVT::ValueType VT = MVT::getVectorType(DstEltVT, Ops.size()); + MVT VT = MVT::getVectorVT(DstEltVT, Ops.size()); return DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); } @@ -3602,8 +3603,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT::ValueType DstEltVT) { // turns into multiple outputs. bool isS2V = ISD::isScalarToVector(BV); unsigned NumOutputsPerInput = SrcBitSize/DstBitSize; - MVT::ValueType VT = MVT::getVectorType(DstEltVT, - NumOutputsPerInput * BV->getNumOperands()); + MVT VT = MVT::getVectorVT(DstEltVT, NumOutputsPerInput*BV->getNumOperands()); SmallVector<SDOperand, 8> Ops; for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { if (BV->getOperand(i).getOpcode() == ISD::UNDEF) { @@ -3635,10 +3635,10 @@ SDOperand DAGCombiner::visitFADD(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -3672,10 +3672,10 @@ SDOperand DAGCombiner::visitFSUB(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -3702,10 +3702,10 @@ SDOperand DAGCombiner::visitFMUL(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -3749,10 +3749,10 @@ SDOperand DAGCombiner::visitFDIV(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold vector ops - if (MVT::isVector(VT)) { + if (VT.isVector()) { SDOperand FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -3782,7 +3782,7 @@ SDOperand DAGCombiner::visitFREM(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (frem c1, c2) -> fmod(c1,c2) if (N0CFP && N1CFP && VT != MVT::ppcf128) @@ -3796,7 +3796,7 @@ SDOperand DAGCombiner::visitFCOPYSIGN(SDNode *N) { SDOperand N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); if (N0CFP && N1CFP && VT != MVT::ppcf128) // Constant fold return DAG.getNode(ISD::FCOPYSIGN, VT, N0, N1); @@ -3839,7 +3839,7 @@ SDOperand DAGCombiner::visitFCOPYSIGN(SDNode *N) { SDOperand DAGCombiner::visitSINT_TO_FP(SDNode *N) { SDOperand N0 = N->getOperand(0); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (sint_to_fp c1) -> c1fp if (N0C && N0.getValueType() != MVT::ppcf128) @@ -3850,7 +3850,7 @@ SDOperand DAGCombiner::visitSINT_TO_FP(SDNode *N) { SDOperand DAGCombiner::visitUINT_TO_FP(SDNode *N) { SDOperand N0 = N->getOperand(0); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (uint_to_fp c1) -> c1fp if (N0C && N0.getValueType() != MVT::ppcf128) @@ -3861,7 +3861,7 @@ SDOperand DAGCombiner::visitUINT_TO_FP(SDNode *N) { SDOperand DAGCombiner::visitFP_TO_SINT(SDNode *N) { SDOperand N0 = N->getOperand(0); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (fp_to_sint c1fp) -> c1 if (N0CFP) @@ -3872,7 +3872,7 @@ SDOperand DAGCombiner::visitFP_TO_SINT(SDNode *N) { SDOperand DAGCombiner::visitFP_TO_UINT(SDNode *N) { SDOperand N0 = N->getOperand(0); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (fp_to_uint c1fp) -> c1 if (N0CFP && VT != MVT::ppcf128) @@ -3884,7 +3884,7 @@ SDOperand DAGCombiner::visitFP_ROUND(SDNode *N) { SDOperand N0 = N->getOperand(0); SDOperand N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (fp_round c1fp) -> c1fp if (N0CFP && N0.getValueType() != MVT::ppcf128) @@ -3915,8 +3915,8 @@ SDOperand DAGCombiner::visitFP_ROUND(SDNode *N) { SDOperand DAGCombiner::visitFP_ROUND_INREG(SDNode *N) { SDOperand N0 = N->getOperand(0); - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); + MVT VT = N->getValueType(0); + MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); // fold (fp_round_inreg c1fp) -> c1fp @@ -3930,7 +3930,7 @@ SDOperand DAGCombiner::visitFP_ROUND_INREG(SDNode *N) { SDOperand DAGCombiner::visitFP_EXTEND(SDNode *N) { SDOperand N0 = N->getOperand(0); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. if (N->hasOneUse() && @@ -3981,13 +3981,13 @@ SDOperand DAGCombiner::visitFNEG(SDNode *N) { // Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading // constant pool values. if (N0.getOpcode() == ISD::BIT_CONVERT && N0.Val->hasOneUse() && - MVT::isInteger(N0.getOperand(0).getValueType()) && - !MVT::isVector(N0.getOperand(0).getValueType())) { + N0.getOperand(0).getValueType().isInteger() && + !N0.getOperand(0).getValueType().isVector()) { SDOperand Int = N0.getOperand(0); - MVT::ValueType IntVT = Int.getValueType(); - if (MVT::isInteger(IntVT) && !MVT::isVector(IntVT)) { + MVT IntVT = Int.getValueType(); + if (IntVT.isInteger() && !IntVT.isVector()) { Int = DAG.getNode(ISD::XOR, IntVT, Int, - DAG.getConstant(MVT::getIntVTSignBit(IntVT), IntVT)); + DAG.getConstant(IntVT.getIntegerVTSignBit(), IntVT)); AddToWorkList(Int.Val); return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), Int); } @@ -3999,7 +3999,7 @@ SDOperand DAGCombiner::visitFNEG(SDNode *N) { SDOperand DAGCombiner::visitFABS(SDNode *N) { SDOperand N0 = N->getOperand(0); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // fold (fabs c1) -> fabs(c1) if (N0CFP && VT != MVT::ppcf128) @@ -4015,13 +4015,13 @@ SDOperand DAGCombiner::visitFABS(SDNode *N) { // Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading // constant pool values. if (N0.getOpcode() == ISD::BIT_CONVERT && N0.Val->hasOneUse() && - MVT::isInteger(N0.getOperand(0).getValueType()) && - !MVT::isVector(N0.getOperand(0).getValueType())) { + N0.getOperand(0).getValueType().isInteger() && + !N0.getOperand(0).getValueType().isVector()) { SDOperand Int = N0.getOperand(0); - MVT::ValueType IntVT = Int.getValueType(); - if (MVT::isInteger(IntVT) && !MVT::isVector(IntVT)) { + MVT IntVT = Int.getValueType(); + if (IntVT.isInteger() && !IntVT.isVector()) { Int = DAG.getNode(ISD::AND, IntVT, Int, - DAG.getConstant(~MVT::getIntVTSignBit(IntVT), IntVT)); + DAG.getConstant(~IntVT.getIntegerVTSignBit(), IntVT)); AddToWorkList(Int.Val); return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), Int); } @@ -4093,7 +4093,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { bool isLoad = true; SDOperand Ptr; - MVT::ValueType VT; + MVT VT; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { if (LD->isIndexed()) return false; @@ -4214,7 +4214,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { bool isLoad = true; SDOperand Ptr; - MVT::ValueType VT; + MVT VT; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { if (LD->isIndexed()) return false; @@ -4513,9 +4513,9 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() && ST->isUnindexed()) { unsigned Align = ST->getAlignment(); - MVT::ValueType SVT = Value.getOperand(0).getValueType(); + MVT SVT = Value.getOperand(0).getValueType(); unsigned OrigAlign = TLI.getTargetMachine().getTargetData()-> - getABITypeAlignment(MVT::getTypeForValueType(SVT)); + getABITypeAlignment(SVT.getTypeForMVT()); if (Align <= OrigAlign && TLI.isOperationLegal(ISD::STORE, SVT)) return DAG.getStore(Chain, Value.getOperand(0), Ptr, ST->getSrcValue(), ST->getSrcValueOffset(), ST->isVolatile(), Align); @@ -4525,7 +4525,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) { if (Value.getOpcode() != ISD::TargetConstantFP) { SDOperand Tmp; - switch (CFP->getValueType(0)) { + switch (CFP->getValueType(0).getSimpleVT()) { default: assert(0 && "Unknown FP type"); case MVT::f80: // We don't do this for these yet. case MVT::f128: @@ -4610,14 +4610,14 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { // FIXME: is there such a thing as a truncating indexed store? if (ST->isTruncatingStore() && ST->isUnindexed() && - MVT::isInteger(Value.getValueType())) { + Value.getValueType().isInteger()) { // See if we can simplify the input to this truncstore with knowledge that // only the low bits are being used. For example: // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8" SDOperand Shorter = GetDemandedBits(Value, APInt::getLowBitsSet(Value.getValueSizeInBits(), - MVT::getSizeInBits(ST->getMemoryVT()))); + ST->getMemoryVT().getSizeInBits())); AddToWorkList(Value.Val); if (Shorter.Val) return DAG.getTruncStore(Chain, Shorter, Ptr, ST->getSrcValue(), @@ -4629,7 +4629,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { if (SimplifyDemandedBits(Value, APInt::getLowBitsSet( Value.getValueSizeInBits(), - MVT::getSizeInBits(ST->getMemoryVT())))) + ST->getMemoryVT().getSizeInBits()))) return SDOperand(N, 0); } @@ -4695,17 +4695,17 @@ SDOperand DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { if (isa<ConstantSDNode>(EltNo)) { unsigned Elt = cast<ConstantSDNode>(EltNo)->getValue(); bool NewLoad = false; - MVT::ValueType VT = InVec.getValueType(); - MVT::ValueType EVT = MVT::getVectorElementType(VT); - MVT::ValueType LVT = EVT; + MVT VT = InVec.getValueType(); + MVT EVT = VT.getVectorElementType(); + MVT LVT = EVT; if (InVec.getOpcode() == ISD::BIT_CONVERT) { - MVT::ValueType BCVT = InVec.getOperand(0).getValueType(); - if (!MVT::isVector(BCVT) - || (MVT::getSizeInBits(EVT) > - MVT::getSizeInBits(MVT::getVectorElementType(BCVT)))) + MVT BCVT = InVec.getOperand(0).getValueType(); + if (!BCVT.isVector() + || (EVT.getSizeInBits() > + BCVT.getVectorElementType().getSizeInBits())) return SDOperand(); InVec = InVec.getOperand(0); - EVT = MVT::getVectorElementType(BCVT); + EVT = BCVT.getVectorElementType(); NewLoad = true; } @@ -4739,7 +4739,7 @@ SDOperand DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { // Check the resultant load doesn't need a higher alignment than the // original load. unsigned NewAlign = TLI.getTargetMachine().getTargetData()-> - getABITypeAlignment(MVT::getTypeForValueType(LVT)); + getABITypeAlignment(LVT.getTypeForMVT()); if (!TLI.isOperationLegal(ISD::LOAD, LVT) || NewAlign > Align) return SDOperand(); Align = NewAlign; @@ -4747,10 +4747,10 @@ SDOperand DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { SDOperand NewPtr = LN0->getBasePtr(); if (Elt) { - unsigned PtrOff = MVT::getSizeInBits(LVT) * Elt / 8; - MVT::ValueType PtrType = NewPtr.getValueType(); + unsigned PtrOff = LVT.getSizeInBits() * Elt / 8; + MVT PtrType = NewPtr.getValueType(); if (TLI.isBigEndian()) - PtrOff = MVT::getSizeInBits(VT) / 8 - PtrOff; + PtrOff = VT.getSizeInBits() / 8 - PtrOff; NewPtr = DAG.getNode(ISD::ADD, PtrType, NewPtr, DAG.getConstant(PtrOff, PtrType)); } @@ -4764,9 +4764,9 @@ SDOperand DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { SDOperand DAGCombiner::visitBUILD_VECTOR(SDNode *N) { unsigned NumInScalars = N->getNumOperands(); - MVT::ValueType VT = N->getValueType(0); - unsigned NumElts = MVT::getVectorNumElements(VT); - MVT::ValueType EltType = MVT::getVectorElementType(VT); + MVT VT = N->getValueType(0); + unsigned NumElts = VT.getVectorNumElements(); + MVT EltType = VT.getVectorElementType(); // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT // operations. If so, and if the EXTRACT_VECTOR_ELT vector inputs come from @@ -4830,7 +4830,7 @@ SDOperand DAGCombiner::visitBUILD_VECTOR(SDNode *N) { } // Add count and size info. - MVT::ValueType BuildVecVT = MVT::getVectorType(TLI.getPointerTy(), NumElts); + MVT BuildVecVT = MVT::getVectorVT(TLI.getPointerTy(), NumElts); // Return the new VECTOR_SHUFFLE node. SDOperand Ops[5]; @@ -4933,7 +4933,7 @@ SDOperand DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { // look though conversions that change things like v4f32 to v2f64. if (V->getOpcode() == ISD::BIT_CONVERT) { SDOperand ConvInput = V->getOperand(0); - if (MVT::getVectorNumElements(ConvInput.getValueType()) == NumElts) + if (ConvInput.getValueType().getVectorNumElements() == NumElts) V = ConvInput.Val; } @@ -5006,7 +5006,7 @@ SDOperand DAGCombiner::XformToShuffleWithZero(SDNode *N) { std::vector<SDOperand> IdxOps; unsigned NumOps = RHS.getNumOperands(); unsigned NumElts = NumOps; - MVT::ValueType EVT = MVT::getVectorElementType(RHS.getValueType()); + MVT EVT = RHS.getValueType().getVectorElementType(); for (unsigned i = 0; i != NumElts; ++i) { SDOperand Elt = RHS.getOperand(i); if (!isa<ConstantSDNode>(Elt)) @@ -5024,7 +5024,7 @@ SDOperand DAGCombiner::XformToShuffleWithZero(SDNode *N) { return SDOperand(); // Return the new VECTOR_SHUFFLE node. - MVT::ValueType VT = MVT::getVectorType(EVT, NumElts); + MVT VT = MVT::getVectorVT(EVT, NumElts); std::vector<SDOperand> Ops; LHS = DAG.getNode(ISD::BIT_CONVERT, VT, LHS); Ops.push_back(LHS); @@ -5052,10 +5052,10 @@ SDOperand DAGCombiner::SimplifyVBinOp(SDNode *N) { // things. Simplifying them may result in a loss of legality. if (AfterLegalize) return SDOperand(); - MVT::ValueType VT = N->getValueType(0); - assert(MVT::isVector(VT) && "SimplifyVBinOp only works on vectors!"); + MVT VT = N->getValueType(0); + assert(VT.isVector() && "SimplifyVBinOp only works on vectors!"); - MVT::ValueType EltType = MVT::getVectorElementType(VT); + MVT EltType = VT.getVectorElementType(); SDOperand LHS = N->getOperand(0); SDOperand RHS = N->getOperand(1); SDOperand Shuffle = XformToShuffleWithZero(N); @@ -5095,7 +5095,7 @@ SDOperand DAGCombiner::SimplifyVBinOp(SDNode *N) { } if (Ops.size() == LHS.getNumOperands()) { - MVT::ValueType VT = LHS.getValueType(); + MVT VT = LHS.getValueType(); return DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); } } @@ -5217,7 +5217,7 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, SDOperand N2, SDOperand N3, ISD::CondCode CC, bool NotExtCompare) { - MVT::ValueType VT = N2.getValueType(); + MVT VT = N2.getValueType(); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.Val); ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.Val); @@ -5255,18 +5255,18 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, // Check to see if we can perform the "gzip trick", transforming // select_cc setlt X, 0, A, 0 -> and (sra X, size(X)-1), A if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT && - MVT::isInteger(N0.getValueType()) && - MVT::isInteger(N2.getValueType()) && + N0.getValueType().isInteger() && + N2.getValueType().isInteger() && (N1C->isNullValue() || // (a < 0) ? b : 0 (N1C->getAPIntValue() == 1 && N0 == N2))) { // (a < 1) ? a : 0 - MVT::ValueType XType = N0.getValueType(); - MVT::ValueType AType = N2.getValueType(); + MVT XType = N0.getValueType(); + MVT AType = N2.getValueType(); if (XType >= AType) { // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a // single-bit constant. if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) { unsigned ShCtV = N2C->getAPIntValue().logBase2(); - ShCtV = MVT::getSizeInBits(XType)-ShCtV-1; + ShCtV = XType.getSizeInBits()-ShCtV-1; SDOperand ShCt = DAG.getConstant(ShCtV, TLI.getShiftAmountTy()); SDOperand Shift = DAG.getNode(ISD::SRL, XType, N0, ShCt); AddToWorkList(Shift.Val); @@ -5277,7 +5277,7 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, return DAG.getNode(ISD::AND, AType, Shift, N2); } SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0, - DAG.getConstant(MVT::getSizeInBits(XType)-1, + DAG.getConstant(XType.getSizeInBits()-1, TLI.getShiftAmountTy())); AddToWorkList(Shift.Val); if (XType > AType) { @@ -5327,7 +5327,7 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, // FIXME: Turn all of these into setcc if setcc if setcc is legal // otherwise, go ahead with the folds. if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getAPIntValue() == 1ULL)) { - MVT::ValueType XType = N0.getValueType(); + MVT XType = N0.getValueType(); if (TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(N0))) { SDOperand Res = DAG.getSetCC(TLI.getSetCCResultType(N0), N0, N1, CC); if (Res.getValueType() != VT) @@ -5340,7 +5340,7 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, TLI.isOperationLegal(ISD::CTLZ, XType)) { SDOperand Ctlz = DAG.getNode(ISD::CTLZ, XType, N0); return DAG.getNode(ISD::SRL, XType, Ctlz, - DAG.getConstant(Log2_32(MVT::getSizeInBits(XType)), + DAG.getConstant(Log2_32(XType.getSizeInBits()), TLI.getShiftAmountTy())); } // setgt X, 0 -> srl (and (-X, ~X), size(X)-1) @@ -5351,13 +5351,13 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, DAG.getConstant(~0ULL, XType)); return DAG.getNode(ISD::SRL, XType, DAG.getNode(ISD::AND, XType, NegN0, NotN0), - DAG.getConstant(MVT::getSizeInBits(XType)-1, + DAG.getConstant(XType.getSizeInBits()-1, TLI.getShiftAmountTy())); } // setgt X, -1 -> xor (srl (X, size(X)-1), 1) if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) { SDOperand Sign = DAG.getNode(ISD::SRL, XType, N0, - DAG.getConstant(MVT::getSizeInBits(XType)-1, + DAG.getConstant(XType.getSizeInBits()-1, TLI.getShiftAmountTy())); return DAG.getNode(ISD::XOR, XType, Sign, DAG.getConstant(1, XType)); } @@ -5367,10 +5367,10 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, // Y = sra (X, size(X)-1); xor (add (X, Y), Y) if (N1C && N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE) && N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1) && - N2.getOperand(0) == N1 && MVT::isInteger(N0.getValueType())) { - MVT::ValueType XType = N0.getValueType(); + N2.getOperand(0) == N1 && N0.getValueType().isInteger()) { + MVT XType = N0.getValueType(); SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0, - DAG.getConstant(MVT::getSizeInBits(XType)-1, + DAG.getConstant(XType.getSizeInBits()-1, TLI.getShiftAmountTy())); SDOperand Add = DAG.getNode(ISD::ADD, XType, N0, Shift); AddToWorkList(Shift.Val); @@ -5382,10 +5382,10 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT && N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) { if (ConstantSDNode *SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0))) { - MVT::ValueType XType = N0.getValueType(); - if (SubC->isNullValue() && MVT::isInteger(XType)) { + MVT XType = N0.getValueType(); + if (SubC->isNullValue() && XType.isInteger()) { SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0, - DAG.getConstant(MVT::getSizeInBits(XType)-1, + DAG.getConstant(XType.getSizeInBits()-1, TLI.getShiftAmountTy())); SDOperand Add = DAG.getNode(ISD::ADD, XType, N0, Shift); AddToWorkList(Shift.Val); @@ -5399,7 +5399,7 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, } /// SimplifySetCC - This is a stub for TargetLowering::SimplifySetCC. -SDOperand DAGCombiner::SimplifySetCC(MVT::ValueType VT, SDOperand N0, +SDOperand DAGCombiner::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, ISD::CondCode Cond, bool foldBooleans) { TargetLowering::DAGCombinerInfo @@ -5502,13 +5502,13 @@ bool DAGCombiner::FindAliasInfo(SDNode *N, const Value *&SrcValue, int &SrcValueOffset) { if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { Ptr = LD->getBasePtr(); - Size = MVT::getSizeInBits(LD->getMemoryVT()) >> 3; + Size = LD->getMemoryVT().getSizeInBits() >> 3; SrcValue = LD->getSrcValue(); SrcValueOffset = LD->getSrcValueOffset(); return true; } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { Ptr = ST->getBasePtr(); - Size = MVT::getSizeInBits(ST->getMemoryVT()) >> 3; + Size = ST->getMemoryVT().getSizeInBits() >> 3; SrcValue = ST->getSrcValue(); SrcValueOffset = ST->getSrcValueOffset(); } else { diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 99f1a03..83ba08d 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -128,13 +128,13 @@ public: /// getTypeAction - Return how we should legalize values of this type, either /// it is already legal or we need to expand it into multiple registers of /// smaller integer type, or we need to promote it to a larger type. - LegalizeAction getTypeAction(MVT::ValueType VT) const { + LegalizeAction getTypeAction(MVT VT) const { return (LegalizeAction)ValueTypeActions.getTypeAction(VT); } /// isTypeLegal - Return true if this type is legal on this target. /// - bool isTypeLegal(MVT::ValueType VT) const { + bool isTypeLegal(MVT VT) const { return getTypeAction(VT) == Legal; } @@ -196,7 +196,7 @@ private: /// /// If this is a legal shuffle, this method returns the (possibly promoted) /// build_vector Mask. If it's not a legal shuffle, it returns null. - SDNode *isShuffleLegal(MVT::ValueType VT, SDOperand Mask) const; + SDNode *isShuffleLegal(MVT VT, SDOperand Mask) const; bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, SmallPtrSet<SDNode*, 32> &NodesLeadingTo); @@ -205,20 +205,14 @@ private: SDOperand ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned, SDOperand &Hi); - SDOperand ExpandIntToFP(bool isSigned, MVT::ValueType DestTy, - SDOperand Source); + SDOperand ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source); - SDOperand EmitStackConvert(SDOperand SrcOp, MVT::ValueType SlotVT, - MVT::ValueType DestVT); + SDOperand EmitStackConvert(SDOperand SrcOp, MVT SlotVT, MVT DestVT); SDOperand ExpandBUILD_VECTOR(SDNode *Node); SDOperand ExpandSCALAR_TO_VECTOR(SDNode *Node); - SDOperand ExpandLegalINT_TO_FP(bool isSigned, - SDOperand LegalOp, - MVT::ValueType DestVT); - SDOperand PromoteLegalINT_TO_FP(SDOperand LegalOp, MVT::ValueType DestVT, - bool isSigned); - SDOperand PromoteLegalFP_TO_INT(SDOperand LegalOp, MVT::ValueType DestVT, - bool isSigned); + SDOperand ExpandLegalINT_TO_FP(bool isSigned, SDOperand LegalOp, MVT DestVT); + SDOperand PromoteLegalINT_TO_FP(SDOperand LegalOp, MVT DestVT, bool isSigned); + SDOperand PromoteLegalFP_TO_INT(SDOperand LegalOp, MVT DestVT, bool isSigned); SDOperand ExpandBSWAP(SDOperand Op); SDOperand ExpandBitCount(unsigned Opc, SDOperand Op); @@ -238,8 +232,7 @@ private: /// /// Note that this will also return true for shuffles that are promoted to a /// different type. -SDNode *SelectionDAGLegalize::isShuffleLegal(MVT::ValueType VT, - SDOperand Mask) const { +SDNode *SelectionDAGLegalize::isShuffleLegal(MVT VT, SDOperand Mask) const { switch (TLI.getOperationAction(ISD::VECTOR_SHUFFLE, VT)) { default: return 0; case TargetLowering::Legal: @@ -248,11 +241,11 @@ SDNode *SelectionDAGLegalize::isShuffleLegal(MVT::ValueType VT, case TargetLowering::Promote: { // If this is promoted to a different type, convert the shuffle mask and // ask if it is legal in the promoted type! - MVT::ValueType NVT = TLI.getTypeToPromoteTo(ISD::VECTOR_SHUFFLE, VT); + MVT NVT = TLI.getTypeToPromoteTo(ISD::VECTOR_SHUFFLE, VT); // If we changed # elements, change the shuffle mask. unsigned NumEltsGrowth = - MVT::getVectorNumElements(NVT) / MVT::getVectorNumElements(VT); + NVT.getVectorNumElements() / VT.getVectorNumElements(); assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); if (NumEltsGrowth > 1) { // Renumber the elements. @@ -458,20 +451,20 @@ bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, /// HandleOp - Legalize, Promote, or Expand the specified operand as /// appropriate for its type. void SelectionDAGLegalize::HandleOp(SDOperand Op) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); switch (getTypeAction(VT)) { default: assert(0 && "Bad type action!"); case Legal: (void)LegalizeOp(Op); break; case Promote: (void)PromoteOp(Op); break; case Expand: - if (!MVT::isVector(VT)) { + if (!VT.isVector()) { // If this is an illegal scalar, expand it into its two component // pieces. SDOperand X, Y; if (Op.getOpcode() == ISD::TargetConstant) break; // Allow illegal target nodes. ExpandOp(Op, X, Y); - } else if (MVT::getVectorNumElements(VT) == 1) { + } else if (VT.getVectorNumElements() == 1) { // If this is an illegal single element vector, convert it to a // scalar operation. (void)ScalarizeVectorOp(Op); @@ -497,7 +490,7 @@ static SDOperand ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, // double. This shrinks FP constants and canonicalizes them for targets where // an FP extending load is the same cost as a normal load (such as on the x87 // fp stack or PPC FP unit). - MVT::ValueType VT = CFP->getValueType(0); + MVT VT = CFP->getValueType(0); ConstantFP *LLVMC = ConstantFP::get(CFP->getValueAPF()); if (!UseCP) { if (VT!=MVT::f64 && VT!=MVT::f32) @@ -506,16 +499,16 @@ static SDOperand ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, (VT == MVT::f64) ? MVT::i64 : MVT::i32); } - MVT::ValueType OrigVT = VT; - MVT::ValueType SVT = VT; + MVT OrigVT = VT; + MVT SVT = VT; while (SVT != MVT::f32) { - SVT = (unsigned)SVT - 1; + SVT = (MVT::SimpleValueType)(SVT.getSimpleVT() - 1); if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) && // Only do this if the target has a native EXTLOAD instruction from // smaller type. TLI.isLoadXLegal(ISD::EXTLOAD, SVT) && TLI.ShouldShrinkFPConstant(OrigVT)) { - const Type *SType = MVT::getTypeForValueType(SVT); + const Type *SType = SVT.getTypeForMVT(); LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType)); VT = SVT; Extend = true; @@ -535,13 +528,13 @@ static SDOperand ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, /// ExpandFCOPYSIGNToBitwiseOps - Expands fcopysign to a series of bitwise /// operations. static -SDOperand ExpandFCOPYSIGNToBitwiseOps(SDNode *Node, MVT::ValueType NVT, +SDOperand ExpandFCOPYSIGNToBitwiseOps(SDNode *Node, MVT NVT, SelectionDAG &DAG, TargetLowering &TLI) { - MVT::ValueType VT = Node->getValueType(0); - MVT::ValueType SrcVT = Node->getOperand(1).getValueType(); + MVT VT = Node->getValueType(0); + MVT SrcVT = Node->getOperand(1).getValueType(); assert((SrcVT == MVT::f32 || SrcVT == MVT::f64) && "fcopysign expansion only supported for f32 and f64"); - MVT::ValueType SrcNVT = (SrcVT == MVT::f64) ? MVT::i64 : MVT::i32; + MVT SrcNVT = (SrcVT == MVT::f64) ? MVT::i64 : MVT::i32; // First get the sign bit of second operand. SDOperand Mask1 = (SrcVT == MVT::f64) @@ -551,7 +544,7 @@ SDOperand ExpandFCOPYSIGNToBitwiseOps(SDNode *Node, MVT::ValueType NVT, SDOperand SignBit= DAG.getNode(ISD::BIT_CONVERT, SrcNVT, Node->getOperand(1)); SignBit = DAG.getNode(ISD::AND, SrcNVT, SignBit, Mask1); // Shift right or sign-extend it if the two operands have different types. - int SizeDiff = MVT::getSizeInBits(SrcNVT) - MVT::getSizeInBits(NVT); + int SizeDiff = SrcNVT.getSizeInBits() - NVT.getSizeInBits(); if (SizeDiff > 0) { SignBit = DAG.getNode(ISD::SRL, SrcNVT, SignBit, DAG.getConstant(SizeDiff, TLI.getShiftAmountTy())); @@ -579,17 +572,17 @@ SDOperand ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, SDOperand Chain = ST->getChain(); SDOperand Ptr = ST->getBasePtr(); SDOperand Val = ST->getValue(); - MVT::ValueType VT = Val.getValueType(); + MVT VT = Val.getValueType(); int Alignment = ST->getAlignment(); int SVOffset = ST->getSrcValueOffset(); - if (MVT::isFloatingPoint(ST->getMemoryVT()) || - MVT::isVector(ST->getMemoryVT())) { + if (ST->getMemoryVT().isFloatingPoint() || + ST->getMemoryVT().isVector()) { // Expand to a bitconvert of the value to the integer type of the // same size, then a (misaligned) int store. - MVT::ValueType intVT; - if (MVT::is128BitVector(VT) || VT == MVT::ppcf128 || VT == MVT::f128) + MVT intVT; + if (VT.is128BitVector() || VT == MVT::ppcf128 || VT == MVT::f128) intVT = MVT::i128; - else if (MVT::is64BitVector(VT) || VT==MVT::f64) + else if (VT.is64BitVector() || VT==MVT::f64) intVT = MVT::i64; else if (VT==MVT::f32) intVT = MVT::i32; @@ -600,12 +593,13 @@ SDOperand ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, return DAG.getStore(Chain, Result, Ptr, ST->getSrcValue(), SVOffset, ST->isVolatile(), Alignment); } - assert(MVT::isInteger(ST->getMemoryVT()) && - !MVT::isVector(ST->getMemoryVT()) && + assert(ST->getMemoryVT().isInteger() && + !ST->getMemoryVT().isVector() && "Unaligned store of unknown type."); // Get the half-size VT - MVT::ValueType NewStoredVT = ST->getMemoryVT() - 1; - int NumBits = MVT::getSizeInBits(NewStoredVT); + MVT NewStoredVT = + (MVT::SimpleValueType)(ST->getMemoryVT().getSimpleVT() - 1); + int NumBits = NewStoredVT.getSizeInBits(); int IncrementSize = NumBits / 8; // Divide the stored value in two parts. @@ -635,16 +629,16 @@ SDOperand ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, int SVOffset = LD->getSrcValueOffset(); SDOperand Chain = LD->getChain(); SDOperand Ptr = LD->getBasePtr(); - MVT::ValueType VT = LD->getValueType(0); - MVT::ValueType LoadedVT = LD->getMemoryVT(); - if (MVT::isFloatingPoint(VT) || MVT::isVector(VT)) { + MVT VT = LD->getValueType(0); + MVT LoadedVT = LD->getMemoryVT(); + if (VT.isFloatingPoint() || VT.isVector()) { // Expand to a (misaligned) integer load of the same size, // then bitconvert to floating point or vector. - MVT::ValueType intVT; - if (MVT::is128BitVector(LoadedVT) || + MVT intVT; + if (LoadedVT.is128BitVector() || LoadedVT == MVT::ppcf128 || LoadedVT == MVT::f128) intVT = MVT::i128; - else if (MVT::is64BitVector(LoadedVT) || LoadedVT == MVT::f64) + else if (LoadedVT.is64BitVector() || LoadedVT == MVT::f64) intVT = MVT::i64; else if (LoadedVT == MVT::f32) intVT = MVT::i32; @@ -655,21 +649,21 @@ SDOperand ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, SVOffset, LD->isVolatile(), LD->getAlignment()); SDOperand Result = DAG.getNode(ISD::BIT_CONVERT, LoadedVT, newLoad); - if (MVT::isFloatingPoint(VT) && LoadedVT != VT) + if (VT.isFloatingPoint() && LoadedVT != VT) Result = DAG.getNode(ISD::FP_EXTEND, VT, Result); SDOperand Ops[] = { Result, Chain }; return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, MVT::Other), Ops, 2); } - assert(MVT::isInteger(LoadedVT) && !MVT::isVector(LoadedVT) && + assert(LoadedVT.isInteger() && !LoadedVT.isVector() && "Unaligned load of unsupported type."); // Compute the new VT that is half the size of the old one. This is an // integer MVT. - unsigned NumBits = MVT::getSizeInBits(LoadedVT); - MVT::ValueType NewLoadedVT; - NewLoadedVT = MVT::getIntegerType(NumBits/2); + unsigned NumBits = LoadedVT.getSizeInBits(); + MVT NewLoadedVT; + NewLoadedVT = MVT::getIntegerVT(NumBits/2); NumBits >>= 1; unsigned Alignment = LD->getAlignment(); @@ -717,23 +711,23 @@ SDOperand ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, /// no way of lowering. "Unroll" the vector, splitting out the scalars and /// operating on each element individually. SDOperand SelectionDAGLegalize::UnrollVectorOp(SDOperand Op) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); assert(isTypeLegal(VT) && "Caller should expand or promote operands that are not legal!"); assert(Op.Val->getNumValues() == 1 && "Can't unroll a vector with multiple results!"); - unsigned NE = MVT::getVectorNumElements(VT); - MVT::ValueType EltVT = MVT::getVectorElementType(VT); + unsigned NE = VT.getVectorNumElements(); + MVT EltVT = VT.getVectorElementType(); SmallVector<SDOperand, 8> Scalars; SmallVector<SDOperand, 4> Operands(Op.getNumOperands()); for (unsigned i = 0; i != NE; ++i) { for (unsigned j = 0; j != Op.getNumOperands(); ++j) { SDOperand Operand = Op.getOperand(j); - MVT::ValueType OperandVT = Operand.getValueType(); - if (MVT::isVector(OperandVT)) { + MVT OperandVT = Operand.getValueType(); + if (OperandVT.isVector()) { // A vector operand; extract a single element. - MVT::ValueType OperandEltVT = MVT::getVectorElementType(OperandVT); + MVT OperandEltVT = OperandVT.getVectorElementType(); Operands[j] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, OperandEltVT, Operand, @@ -751,7 +745,7 @@ SDOperand SelectionDAGLegalize::UnrollVectorOp(SDOperand Op) { } /// GetFPLibCall - Return the right libcall for the given floating point type. -static RTLIB::Libcall GetFPLibCall(MVT::ValueType VT, +static RTLIB::Libcall GetFPLibCall(MVT VT, RTLIB::Libcall Call_F32, RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, @@ -780,10 +774,10 @@ PerformInsertVectorEltInMemory(SDOperand Vec, SDOperand Val, SDOperand Idx) { // with a "move to register" or "extload into register" instruction, then // permute it into place, if the idx is a constant and if the idx is // supported by the target. - MVT::ValueType VT = Tmp1.getValueType(); - MVT::ValueType EltVT = MVT::getVectorElementType(VT); - MVT::ValueType IdxVT = Tmp3.getValueType(); - MVT::ValueType PtrVT = TLI.getPointerTy(); + MVT VT = Tmp1.getValueType(); + MVT EltVT = VT.getVectorElementType(); + MVT IdxVT = Tmp3.getValueType(); + MVT PtrVT = TLI.getPointerTy(); SDOperand StackPtr = DAG.CreateStackTemporary(VT); FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr.Val); @@ -798,7 +792,7 @@ PerformInsertVectorEltInMemory(SDOperand Vec, SDOperand Val, SDOperand Idx) { unsigned CastOpc = (IdxVT > PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; Tmp3 = DAG.getNode(CastOpc, PtrVT, Tmp3); // Add the offset to the index. - unsigned EltSize = MVT::getSizeInBits(EltVT)/8; + unsigned EltSize = EltVT.getSizeInBits()/8; Tmp3 = DAG.getNode(ISD::MUL, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); SDOperand StackPtr2 = DAG.getNode(ISD::ADD, IdxVT, Tmp3, StackPtr); // Store the scalar value. @@ -911,7 +905,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Result = DAG.getConstant(0, TLI.getPointerTy()); break; case ISD::FRAME_TO_ARGS_OFFSET: { - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); switch (TLI.getOperationAction(Node->getOpcode(), VT)) { default: assert(0 && "This action is not supported yet!"); case TargetLowering::Custom: @@ -926,7 +920,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; case ISD::EXCEPTIONADDR: { Tmp1 = LegalizeOp(Node->getOperand(0)); - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); switch (TLI.getOperationAction(Node->getOpcode(), VT)) { default: assert(0 && "This action is not supported yet!"); case TargetLowering::Expand: { @@ -961,7 +955,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case ISD::EHSELECTION: { Tmp1 = LegalizeOp(Node->getOperand(0)); Tmp2 = LegalizeOp(Node->getOperand(1)); - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); switch (TLI.getOperationAction(Node->getOpcode(), VT)) { default: assert(0 && "This action is not supported yet!"); case TargetLowering::Expand: { @@ -994,7 +988,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { AddLegalizedOperand(Op.getValue(1), Tmp2); return Op.ResNo ? Tmp2 : Tmp1; case ISD::EH_RETURN: { - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); // The only "good" option for this node is to custom lower it. switch (TLI.getOperationAction(Node->getOpcode(), VT)) { default: assert(0 && "This action is not supported at all!"); @@ -1039,14 +1033,14 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { AddLegalizedOperand(Op.getValue(1), Result.getValue(1)); return Result.getValue(Op.ResNo); case ISD::UNDEF: { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); switch (TLI.getOperationAction(ISD::UNDEF, VT)) { default: assert(0 && "This action is not supported yet!"); case TargetLowering::Expand: - if (MVT::isInteger(VT)) + if (VT.isInteger()) Result = DAG.getConstant(0, VT); - else if (MVT::isFloatingPoint(VT)) - Result = DAG.getConstantFP(APFloat(APInt(MVT::getSizeInBits(VT), 0)), + else if (VT.isFloatingPoint()) + Result = DAG.getConstantFP(APFloat(APInt(VT.getSizeInBits(), 0)), VT); else assert(0 && "Unknown value type!"); @@ -1454,13 +1448,14 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // SCALAR_TO_VECTOR requires that the type of the value being inserted // match the element type of the vector being created. if (Tmp2.getValueType() == - MVT::getVectorElementType(Op.getValueType())) { + Op.getValueType().getVectorElementType()) { SDOperand ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, Tmp1.getValueType(), Tmp2); - unsigned NumElts = MVT::getVectorNumElements(Tmp1.getValueType()); - MVT::ValueType ShufMaskVT = MVT::getIntVectorWithNumElements(NumElts); - MVT::ValueType ShufMaskEltVT = MVT::getVectorElementType(ShufMaskVT); + unsigned NumElts = Tmp1.getValueType().getVectorNumElements(); + MVT ShufMaskVT = + MVT::getIntVectorWithNumElements(NumElts); + MVT ShufMaskEltVT = ShufMaskVT.getVectorElementType(); // We generate a shuffle of InVec and ScVec, so the shuffle mask // should be 0,1,2,3,4,5... with the appropriate element replaced with @@ -1531,9 +1526,9 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } // FALLTHROUGH case TargetLowering::Expand: { - MVT::ValueType VT = Node->getValueType(0); - MVT::ValueType EltVT = MVT::getVectorElementType(VT); - MVT::ValueType PtrVT = TLI.getPointerTy(); + MVT VT = Node->getValueType(0); + MVT EltVT = VT.getVectorElementType(); + MVT PtrVT = TLI.getPointerTy(); SDOperand Mask = Node->getOperand(2); unsigned NumElems = Mask.getNumOperands(); SmallVector<SDOperand,8> Ops; @@ -1557,8 +1552,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } case TargetLowering::Promote: { // Change base type to a different vector type. - MVT::ValueType OVT = Node->getValueType(0); - MVT::ValueType NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); + MVT OVT = Node->getValueType(0); + MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); // Cast the two input vectors. Tmp1 = DAG.getNode(ISD::BIT_CONVERT, NVT, Tmp1); @@ -1679,7 +1674,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1)); return Result.getValue(Op.ResNo); case ISD::DYNAMIC_STACKALLOC: { - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. Tmp2 = LegalizeOp(Node->getOperand(1)); // Legalize the size. Tmp3 = LegalizeOp(Node->getOperand(2)); // Legalize the alignment. @@ -1820,7 +1815,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { SDOperand Table = Result.getOperand(1); SDOperand Index = Result.getOperand(2); - MVT::ValueType PTy = TLI.getPointerTy(); + MVT PTy = TLI.getPointerTy(); MachineFunction &MF = DAG.getMachineFunction(); unsigned EntrySize = MF.getJumpTableInfo()->getEntrySize(); Index= DAG.getNode(ISD::MUL, PTy, Index, DAG.getConstant(EntrySize, PTy)); @@ -1939,7 +1934,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { ISD::LoadExtType ExtType = LD->getExtensionType(); if (ExtType == ISD::NON_EXTLOAD) { - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset()); Tmp3 = Result.getValue(0); Tmp4 = Result.getValue(1); @@ -1951,7 +1946,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // expand it. if (!TLI.allowsUnalignedMemoryAccesses()) { unsigned ABIAlignment = TLI.getTargetData()-> - getABITypeAlignment(MVT::getTypeForValueType(LD->getMemoryVT())); + getABITypeAlignment(LD->getMemoryVT().getTypeForMVT()); if (LD->getAlignment() < ABIAlignment){ Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.Val), DAG, TLI); @@ -1971,9 +1966,9 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; case TargetLowering::Promote: { // Only promote a load of vector type to another. - assert(MVT::isVector(VT) && "Cannot promote this load!"); + assert(VT.isVector() && "Cannot promote this load!"); // Change base type to a different vector type. - MVT::ValueType NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); + MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); Tmp1 = DAG.getLoad(NVT, Tmp1, Tmp2, LD->getSrcValue(), LD->getSrcValueOffset(), @@ -1989,13 +1984,13 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { AddLegalizedOperand(SDOperand(Node, 1), Tmp4); return Op.ResNo ? Tmp4 : Tmp3; } else { - MVT::ValueType SrcVT = LD->getMemoryVT(); - unsigned SrcWidth = MVT::getSizeInBits(SrcVT); + MVT SrcVT = LD->getMemoryVT(); + unsigned SrcWidth = SrcVT.getSizeInBits(); int SVOffset = LD->getSrcValueOffset(); unsigned Alignment = LD->getAlignment(); bool isVolatile = LD->isVolatile(); - if (SrcWidth != MVT::getStoreSizeInBits(SrcVT) && + if (SrcWidth != SrcVT.getStoreSizeInBits() && // Some targets pretend to have an i1 loading operation, and actually // load an i8. This trick is correct for ZEXTLOAD because the top 7 // bits are guaranteed to be zero; it helps the optimizers understand @@ -2007,8 +2002,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { TLI.getLoadXAction(ExtType, MVT::i1) == TargetLowering::Promote)) { // Promote to a byte-sized load if not loading an integral number of // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. - unsigned NewWidth = MVT::getStoreSizeInBits(SrcVT); - MVT::ValueType NVT = MVT::getIntegerType(NewWidth); + unsigned NewWidth = SrcVT.getStoreSizeInBits(); + MVT NVT = MVT::getIntegerVT(NewWidth); SDOperand Ch; // The extra bits are guaranteed to be zero, since we stored them that @@ -2036,7 +2031,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Tmp2 = LegalizeOp(Ch); } else if (SrcWidth & (SrcWidth - 1)) { // If not loading a power-of-2 number of bits, expand as two loads. - assert(MVT::isExtendedVT(SrcVT) && !MVT::isVector(SrcVT) && + assert(SrcVT.isExtended() && !SrcVT.isVector() && "Unsupported extload!"); unsigned RoundWidth = 1 << Log2_32(SrcWidth); assert(RoundWidth < SrcWidth); @@ -2044,8 +2039,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { assert(ExtraWidth < RoundWidth); assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && "Load size not an integral number of bytes!"); - MVT::ValueType RoundVT = MVT::getIntegerType(RoundWidth); - MVT::ValueType ExtraVT = MVT::getIntegerType(ExtraWidth); + MVT RoundVT = MVT::getIntegerVT(RoundWidth); + MVT ExtraVT = MVT::getIntegerVT(ExtraWidth); SDOperand Lo, Hi, Ch; unsigned IncrementSize; @@ -2130,7 +2125,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // expand it. if (!TLI.allowsUnalignedMemoryAccesses()) { unsigned ABIAlignment = TLI.getTargetData()-> - getABITypeAlignment(MVT::getTypeForValueType(LD->getMemoryVT())); + getABITypeAlignment(LD->getMemoryVT().getTypeForMVT()); if (LD->getAlignment() < ABIAlignment){ Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.Val), DAG, TLI); @@ -2180,14 +2175,14 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } } case ISD::EXTRACT_ELEMENT: { - MVT::ValueType OpTy = Node->getOperand(0).getValueType(); + MVT OpTy = Node->getOperand(0).getValueType(); switch (getTypeAction(OpTy)) { default: assert(0 && "EXTRACT_ELEMENT action for type unimplemented!"); case Legal: if (cast<ConstantSDNode>(Node->getOperand(1))->getValue()) { // 1 -> Hi Result = DAG.getNode(ISD::SRL, OpTy, Node->getOperand(0), - DAG.getConstant(MVT::getSizeInBits(OpTy)/2, + DAG.getConstant(OpTy.getSizeInBits()/2, TLI.getShiftAmountTy())); Result = DAG.getNode(ISD::TRUNCATE, Node->getValueType(0), Result); } else { @@ -2252,7 +2247,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Result = DAG.UpdateNodeOperands(Result, Tmp1, LegalizeOp(Tmp2), Tmp3); break; case Expand: - if (!MVT::isVector(Tmp2.getValueType())) { + if (!Tmp2.getValueType().isVector()) { SDOperand Lo, Hi; ExpandOp(Tmp2, Lo, Hi); @@ -2268,12 +2263,12 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } else { SDNode *InVal = Tmp2.Val; int InIx = Tmp2.ResNo; - unsigned NumElems = MVT::getVectorNumElements(InVal->getValueType(InIx)); - MVT::ValueType EVT = MVT::getVectorElementType(InVal->getValueType(InIx)); + unsigned NumElems = InVal->getValueType(InIx).getVectorNumElements(); + MVT EVT = InVal->getValueType(InIx).getVectorElementType(); // Figure out if there is a simple type corresponding to this Vector // type. If so, convert to the vector type. - MVT::ValueType TVT = MVT::getVectorType(EVT, NumElems); + MVT TVT = MVT::getVectorVT(EVT, NumElems); if (TLI.isTypeLegal(TVT)) { // Turn this into a return of the vector type. Tmp2 = LegalizeOp(Tmp2); @@ -2321,7 +2316,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; case Expand: { SDOperand Lo, Hi; - assert(!MVT::isExtendedVT(Node->getOperand(i).getValueType()) && + assert(!Node->getOperand(i).getValueType().isExtended() && "FIXME: TODO: implement returning non-legal vector types!"); ExpandOp(Node->getOperand(i), Lo, Hi); NewValues.push_back(Lo); @@ -2417,7 +2412,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2, ST->getOffset()); - MVT::ValueType VT = Tmp3.getValueType(); + MVT VT = Tmp3.getValueType(); switch (TLI.getOperationAction(ISD::STORE, VT)) { default: assert(0 && "This action is not supported yet!"); case TargetLowering::Legal: @@ -2425,7 +2420,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // expand it. if (!TLI.allowsUnalignedMemoryAccesses()) { unsigned ABIAlignment = TLI.getTargetData()-> - getABITypeAlignment(MVT::getTypeForValueType(ST->getMemoryVT())); + getABITypeAlignment(ST->getMemoryVT().getTypeForMVT()); if (ST->getAlignment() < ABIAlignment) Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.Val), DAG, TLI); @@ -2436,7 +2431,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { if (Tmp1.Val) Result = Tmp1; break; case TargetLowering::Promote: - assert(MVT::isVector(VT) && "Unknown legal promote case!"); + assert(VT.isVector() && "Unknown legal promote case!"); Tmp3 = DAG.getNode(ISD::BIT_CONVERT, TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3); Result = DAG.getStore(Tmp1, Tmp3, Tmp2, @@ -2461,16 +2456,16 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // If this is a vector type, then we have to calculate the increment as // the product of the element size in bytes, and the number of elements // in the high half of the vector. - if (MVT::isVector(ST->getValue().getValueType())) { + if (ST->getValue().getValueType().isVector()) { SDNode *InVal = ST->getValue().Val; int InIx = ST->getValue().ResNo; - MVT::ValueType InVT = InVal->getValueType(InIx); - unsigned NumElems = MVT::getVectorNumElements(InVT); - MVT::ValueType EVT = MVT::getVectorElementType(InVT); + MVT InVT = InVal->getValueType(InIx); + unsigned NumElems = InVT.getVectorNumElements(); + MVT EVT = InVT.getVectorElementType(); // Figure out if there is a simple type corresponding to this Vector // type. If so, convert to the vector type. - MVT::ValueType TVT = MVT::getVectorType(EVT, NumElems); + MVT TVT = MVT::getVectorVT(EVT, NumElems); if (TLI.isTypeLegal(TVT)) { // Turn this into a normal store of the vector type. Tmp3 = LegalizeOp(ST->getValue()); @@ -2489,12 +2484,12 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; } else { SplitVectorOp(ST->getValue(), Lo, Hi); - IncrementSize = MVT::getVectorNumElements(Lo.Val->getValueType(0)) * - MVT::getSizeInBits(EVT)/8; + IncrementSize = Lo.Val->getValueType(0).getVectorNumElements() * + EVT.getSizeInBits()/8; } } else { ExpandOp(ST->getValue(), Lo, Hi); - IncrementSize = Hi.Val ? MVT::getSizeInBits(Hi.getValueType())/8 : 0; + IncrementSize = Hi.Val ? Hi.getValueType().getSizeInBits()/8 : 0; if (TLI.isBigEndian()) std::swap(Lo, Hi); @@ -2537,20 +2532,20 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { SVOffset, MVT::i8, isVolatile, Alignment); } - MVT::ValueType StVT = ST->getMemoryVT(); - unsigned StWidth = MVT::getSizeInBits(StVT); + MVT StVT = ST->getMemoryVT(); + unsigned StWidth = StVT.getSizeInBits(); - if (StWidth != MVT::getStoreSizeInBits(StVT)) { + if (StWidth != StVT.getStoreSizeInBits()) { // Promote to a byte-sized store with upper bits zero if not // storing an integral number of bytes. For example, promote // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) - MVT::ValueType NVT = MVT::getIntegerType(MVT::getStoreSizeInBits(StVT)); + MVT NVT = MVT::getIntegerVT(StVT.getStoreSizeInBits()); Tmp3 = DAG.getZeroExtendInReg(Tmp3, StVT); Result = DAG.getTruncStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(), SVOffset, NVT, isVolatile, Alignment); } else if (StWidth & (StWidth - 1)) { // If not storing a power-of-2 number of bits, expand as two stores. - assert(MVT::isExtendedVT(StVT) && !MVT::isVector(StVT) && + assert(StVT.isExtended() && !StVT.isVector() && "Unsupported truncstore!"); unsigned RoundWidth = 1 << Log2_32(StWidth); assert(RoundWidth < StWidth); @@ -2558,8 +2553,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { assert(ExtraWidth < RoundWidth); assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && "Store size not an integral number of bytes!"); - MVT::ValueType RoundVT = MVT::getIntegerType(RoundWidth); - MVT::ValueType ExtraVT = MVT::getIntegerType(ExtraWidth); + MVT RoundVT = MVT::getIntegerVT(RoundWidth); + MVT ExtraVT = MVT::getIntegerVT(ExtraWidth); SDOperand Lo, Hi; unsigned IncrementSize; @@ -2612,7 +2607,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // expand it. if (!TLI.allowsUnalignedMemoryAccesses()) { unsigned ABIAlignment = TLI.getTargetData()-> - getABITypeAlignment(MVT::getTypeForValueType(ST->getMemoryVT())); + getABITypeAlignment(ST->getMemoryVT().getTypeForMVT()); if (ST->getAlignment() < ABIAlignment) Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.Val), DAG, TLI); @@ -2761,13 +2756,13 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } break; case TargetLowering::Promote: { - MVT::ValueType NVT = + MVT NVT = TLI.getTypeToPromoteTo(ISD::SELECT, Tmp2.getValueType()); unsigned ExtOp, TruncOp; - if (MVT::isVector(Tmp2.getValueType())) { + if (Tmp2.getValueType().isVector()) { ExtOp = ISD::BIT_CONVERT; TruncOp = ISD::BIT_CONVERT; - } else if (MVT::isInteger(Tmp2.getValueType())) { + } else if (Tmp2.getValueType().isInteger()) { ExtOp = ISD::ANY_EXTEND; TruncOp = ISD::TRUNCATE; } else { @@ -2847,23 +2842,23 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // First step, figure out the appropriate operation to use. // Allow SETCC to not be supported for all legal data types // Mostly this targets FP - MVT::ValueType NewInTy = Node->getOperand(0).getValueType(); - MVT::ValueType OldVT = NewInTy; OldVT = OldVT; + MVT NewInTy = Node->getOperand(0).getValueType(); + MVT OldVT = NewInTy; OldVT = OldVT; // Scan for the appropriate larger type to use. while (1) { - NewInTy = (MVT::ValueType)(NewInTy+1); + NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT()+1); - assert(MVT::isInteger(NewInTy) == MVT::isInteger(OldVT) && + assert(NewInTy.isInteger() == OldVT.isInteger() && "Fell off of the edge of the integer world"); - assert(MVT::isFloatingPoint(NewInTy) == MVT::isFloatingPoint(OldVT) && + assert(NewInTy.isFloatingPoint() == OldVT.isFloatingPoint() && "Fell off of the edge of the floating point world"); // If the target supports SETCC of this type, use it. if (TLI.isOperationLegal(ISD::SETCC, NewInTy)) break; } - if (MVT::isInteger(NewInTy)) + if (NewInTy.isInteger()) assert(0 && "Cannot promote Legal Integer SETCC yet"); else { Tmp1 = DAG.getNode(ISD::FP_EXTEND, NewInTy, Tmp1); @@ -2878,7 +2873,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case TargetLowering::Expand: // Expand a setcc node into a select_cc of the same condition, lhs, and // rhs that selects between const 1 (true) and const 0 (false). - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); Result = DAG.getNode(ISD::SELECT_CC, VT, Tmp1, Tmp2, DAG.getConstant(1, VT), DAG.getConstant(0, VT), Tmp3); @@ -2983,7 +2978,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { if (Tmp1.Val) Result = Tmp1; break; case TargetLowering::Expand: { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); // See if multiply or divide can be lowered using two-result operations. SDVTList VTs = DAG.getVTList(VT, VT); @@ -3056,7 +3051,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; } - assert(MVT::isVector(Node->getValueType(0)) && + assert(Node->getValueType(0).isVector() && "Cannot expand this binary operator!"); // Expand the operation into a bunch of nasty scalar code. Result = LegalizeOp(UnrollVectorOp(Op)); @@ -3068,9 +3063,9 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case ISD::AND: case ISD::OR: case ISD::XOR: { - MVT::ValueType OVT = Node->getValueType(0); - MVT::ValueType NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); - assert(MVT::isVector(OVT) && "Cannot promote this BinOp!"); + MVT OVT = Node->getValueType(0); + MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); + assert(OVT.isVector() && "Cannot promote this BinOp!"); // Bit convert each of the values to the new type. Tmp1 = DAG.getNode(ISD::BIT_CONVERT, NVT, Tmp1); Tmp2 = DAG.getNode(ISD::BIT_CONVERT, NVT, Tmp2); @@ -3128,7 +3123,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { TLI.getOperationAction(ISD::FNEG, Tmp1.getValueType()) == TargetLowering::Legal) { // Get the sign bit of the RHS. - MVT::ValueType IVT = + MVT IVT = Tmp2.getValueType() == MVT::f32 ? MVT::i32 : MVT::i64; SDOperand SignBit = DAG.getNode(ISD::BIT_CONVERT, IVT, Tmp2); SignBit = DAG.getSetCC(TLI.getSetCCResultType(SignBit), @@ -3146,7 +3141,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } // Otherwise, do bitwise ops! - MVT::ValueType NVT = + MVT NVT = Node->getValueType(0) == MVT::f32 ? MVT::i32 : MVT::i64; Result = ExpandFCOPYSIGNToBitwiseOps(Node, NVT, DAG, TLI); Result = DAG.getNode(ISD::BIT_CONVERT, Node->getValueType(0), Result); @@ -3180,7 +3175,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { return Result; case ISD::BUILD_PAIR: { - MVT::ValueType PairTy = Node->getValueType(0); + MVT PairTy = Node->getValueType(0); // TODO: handle the case where the Lo and Hi operands are not of legal type Tmp1 = LegalizeOp(Node->getOperand(0)); // Lo Tmp2 = LegalizeOp(Node->getOperand(1)); // Hi @@ -3196,7 +3191,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, PairTy, Tmp1); Tmp2 = DAG.getNode(ISD::ANY_EXTEND, PairTy, Tmp2); Tmp2 = DAG.getNode(ISD::SHL, PairTy, Tmp2, - DAG.getConstant(MVT::getSizeInBits(PairTy)/2, + DAG.getConstant(PairTy.getSizeInBits()/2, TLI.getShiftAmountTy())); Result = DAG.getNode(ISD::OR, PairTy, Tmp1, Tmp2); break; @@ -3225,7 +3220,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case TargetLowering::Expand: { unsigned DivOpc= (Node->getOpcode() == ISD::UREM) ? ISD::UDIV : ISD::SDIV; bool isSigned = DivOpc == ISD::SDIV; - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); // See if remainder can be lowered using two-result operations. SDVTList VTs = DAG.getVTList(VT, VT); @@ -3240,14 +3235,14 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; } - if (MVT::isInteger(VT)) { + if (VT.isInteger()) { if (TLI.getOperationAction(DivOpc, VT) == TargetLowering::Legal) { // X % Y -> X-X/Y*Y Result = DAG.getNode(DivOpc, VT, Tmp1, Tmp2); Result = DAG.getNode(ISD::MUL, VT, Result, Tmp2); Result = DAG.getNode(ISD::SUB, VT, Tmp1, Result); - } else if (MVT::isVector(VT)) { + } else if (VT.isVector()) { Result = LegalizeOp(UnrollVectorOp(Op)); } else { assert(VT == MVT::i32 && @@ -3258,9 +3253,9 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Result = ExpandLibCall(LC, Node, isSigned, Dummy); } } else { - assert(MVT::isFloatingPoint(VT) && + assert(VT.isFloatingPoint() && "remainder op must have integer or floating-point type"); - if (MVT::isVector(VT)) { + if (VT.isVector()) { Result = LegalizeOp(UnrollVectorOp(Op)); } else { // Floating point mod -> fmod libcall. @@ -3278,7 +3273,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. Tmp2 = LegalizeOp(Node->getOperand(1)); // Legalize the pointer. - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); switch (TLI.getOperationAction(Node->getOpcode(), MVT::Other)) { default: assert(0 && "This action is not supported yet!"); case TargetLowering::Custom: @@ -3302,7 +3297,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0); // Increment the pointer, VAList, to the next vaarg Tmp3 = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList, - DAG.getConstant(MVT::getSizeInBits(VT)/8, + DAG.getConstant(VT.getSizeInBits()/8, TLI.getPointerTy())); // Store the incremented VAList to the legalized pointer Tmp3 = DAG.getStore(VAList.getValue(1), Tmp3, Tmp2, V, 0); @@ -3420,9 +3415,9 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Result = DAG.UpdateNodeOperands(Result, Tmp1); break; case TargetLowering::Promote: { - MVT::ValueType OVT = Tmp1.getValueType(); - MVT::ValueType NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); - unsigned DiffBits = MVT::getSizeInBits(NVT) - MVT::getSizeInBits(OVT); + MVT OVT = Tmp1.getValueType(); + MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); + unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, NVT, Tmp1); Tmp1 = DAG.getNode(ISD::BSWAP, NVT, Tmp1); @@ -3453,8 +3448,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } break; case TargetLowering::Promote: { - MVT::ValueType OVT = Tmp1.getValueType(); - MVT::ValueType NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); + MVT OVT = Tmp1.getValueType(); + MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); // Zero extend the argument. Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, NVT, Tmp1); @@ -3467,16 +3462,16 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case ISD::CTTZ: //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT) Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(Tmp1), Tmp1, - DAG.getConstant(MVT::getSizeInBits(NVT), NVT), + DAG.getConstant(NVT.getSizeInBits(), NVT), ISD::SETEQ); Result = DAG.getNode(ISD::SELECT, NVT, Tmp2, - DAG.getConstant(MVT::getSizeInBits(OVT),NVT), Tmp1); + DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1); break; case ISD::CTLZ: // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) Result = DAG.getNode(ISD::SUB, NVT, Tmp1, - DAG.getConstant(MVT::getSizeInBits(NVT) - - MVT::getSizeInBits(OVT), NVT)); + DAG.getConstant(NVT.getSizeInBits() - + OVT.getSizeInBits(), NVT)); break; } break; @@ -3516,7 +3511,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; case ISD::FABS: { // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X). - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); Tmp2 = DAG.getConstantFP(0.0, VT); Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(Tmp1), Tmp1, Tmp2, ISD::SETUGT); @@ -3527,10 +3522,10 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case ISD::FSQRT: case ISD::FSIN: case ISD::FCOS: { - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); // Expand unsupported unary vector operators by unrolling them. - if (MVT::isVector(VT)) { + if (VT.isVector()) { Result = LegalizeOp(UnrollVectorOp(Op)); break; } @@ -3560,10 +3555,10 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } break; case ISD::FPOWI: { - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); // Expand unsupported unary vector operators by unrolling them. - if (MVT::isVector(VT)) { + if (VT.isVector()) { Result = LegalizeOp(UnrollVectorOp(Op)); break; } @@ -3579,17 +3574,17 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { if (!isTypeLegal(Node->getOperand(0).getValueType())) { Result = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), Node->getValueType(0)); - } else if (MVT::isVector(Op.getOperand(0).getValueType())) { + } else if (Op.getOperand(0).getValueType().isVector()) { // The input has to be a vector type, we have to either scalarize it, pack // it, or convert it based on whether the input vector type is legal. SDNode *InVal = Node->getOperand(0).Val; int InIx = Node->getOperand(0).ResNo; - unsigned NumElems = MVT::getVectorNumElements(InVal->getValueType(InIx)); - MVT::ValueType EVT = MVT::getVectorElementType(InVal->getValueType(InIx)); + unsigned NumElems = InVal->getValueType(InIx).getVectorNumElements(); + MVT EVT = InVal->getValueType(InIx).getVectorElementType(); // Figure out if there is a simple type corresponding to this Vector // type. If so, convert to the vector type. - MVT::ValueType TVT = MVT::getVectorType(EVT, NumElems); + MVT TVT = MVT::getVectorVT(EVT, NumElems); if (TLI.isTypeLegal(TVT)) { // Turn this into a bit convert of the vector input. Result = DAG.getNode(ISD::BIT_CONVERT, Node->getValueType(0), @@ -3716,11 +3711,11 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case TargetLowering::Expand: if (Node->getOpcode() == ISD::FP_TO_UINT) { SDOperand True, False; - MVT::ValueType VT = Node->getOperand(0).getValueType(); - MVT::ValueType NVT = Node->getValueType(0); + MVT VT = Node->getOperand(0).getValueType(); + MVT NVT = Node->getValueType(0); const uint64_t zero[] = {0, 0}; - APFloat apf = APFloat(APInt(MVT::getSizeInBits(VT), 2, zero)); - APInt x = APInt::getSignBit(MVT::getSizeInBits(NVT)); + APFloat apf = APFloat(APInt(VT.getSizeInBits(), 2, zero)); + APInt x = APInt::getSignBit(NVT.getSizeInBits()); (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); Tmp2 = DAG.getConstantFP(apf, VT); Tmp3 = DAG.getSetCC(TLI.getSetCCResultType(Node->getOperand(0)), @@ -3740,8 +3735,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } break; case Expand: { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType OVT = Node->getOperand(0).getValueType(); + MVT VT = Op.getValueType(); + MVT OVT = Node->getOperand(0).getValueType(); // Convert ppcf128 to i32 if (OVT == MVT::ppcf128 && VT == MVT::i32) { if (Node->getOpcode() == ISD::FP_TO_SINT) { @@ -3858,8 +3853,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; case ISD::FP_EXTEND: { - MVT::ValueType DstVT = Op.getValueType(); - MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); + MVT DstVT = Op.getValueType(); + MVT SrcVT = Op.getOperand(0).getValueType(); if (TLI.getConvertAction(SrcVT, DstVT) == TargetLowering::Expand) { // The only other way we can lower this is to turn it into a STORE, // LOAD pair, targetting a temporary location (a stack slot). @@ -3880,8 +3875,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; } case ISD::FP_ROUND: { - MVT::ValueType DstVT = Op.getValueType(); - MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); + MVT DstVT = Op.getValueType(); + MVT SrcVT = Op.getOperand(0).getValueType(); if (TLI.getConvertAction(SrcVT, DstVT) == TargetLowering::Expand) { if (SrcVT == MVT::ppcf128) { SDOperand Lo; @@ -3949,7 +3944,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case ISD::FP_ROUND_INREG: case ISD::SIGN_EXTEND_INREG: { Tmp1 = LegalizeOp(Node->getOperand(0)); - MVT::ValueType ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); + MVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); // If this operation is not supported, convert it to a shl/shr or load/store // pair. @@ -3963,8 +3958,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { if (Node->getOpcode() == ISD::SIGN_EXTEND_INREG) { // NOTE: we could fall back on load/store here too for targets without // SAR. However, it is doubtful that any exist. - unsigned BitsDiff = MVT::getSizeInBits(Node->getValueType(0)) - - MVT::getSizeInBits(ExtraVT); + unsigned BitsDiff = Node->getValueType(0).getSizeInBits() - + ExtraVT.getSizeInBits(); SDOperand ShiftCst = DAG.getConstant(BitsDiff, TLI.getShiftAmountTy()); Result = DAG.getNode(ISD::SHL, Node->getValueType(0), Node->getOperand(0), ShiftCst); @@ -4004,7 +3999,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { return Op.ResNo ? Tmp1 : Result; } case ISD::FLT_ROUNDS_: { - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); switch (TLI.getOperationAction(Node->getOpcode(), VT)) { default: assert(0 && "This action not supported for this op yet!"); case TargetLowering::Custom: @@ -4019,7 +4014,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; } case ISD::TRAP: { - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); switch (TLI.getOperationAction(Node->getOpcode(), VT)) { default: assert(0 && "This action not supported for this op yet!"); case TargetLowering::Legal: @@ -4064,11 +4059,11 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { /// have the correct bits for the low portion of the register, but no guarantee /// is made about the top bits: it may be zero, sign-extended, or garbage. SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType NVT = TLI.getTypeToTransformTo(VT); + MVT VT = Op.getValueType(); + MVT NVT = TLI.getTypeToTransformTo(VT); assert(getTypeAction(VT) == Promote && "Caller should expand or legalize operands that are not promotable!"); - assert(NVT > VT && MVT::isInteger(NVT) == MVT::isInteger(VT) && + assert(NVT > VT && NVT.isInteger() == VT.isInteger() && "Cannot promote to smaller type!"); SDOperand Tmp1, Tmp2, Tmp3; @@ -4341,7 +4336,7 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { // These operators require that their input be sign extended. Tmp1 = PromoteOp(Node->getOperand(0)); Tmp2 = PromoteOp(Node->getOperand(1)); - if (MVT::isInteger(NVT)) { + if (NVT.isInteger()) { Tmp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, NVT, Tmp1, DAG.getValueType(VT)); Tmp2 = DAG.getNode(ISD::SIGN_EXTEND_INREG, NVT, Tmp2, @@ -4350,7 +4345,7 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { Result = DAG.getNode(Node->getOpcode(), NVT, Tmp1, Tmp2); // Perform FP_ROUND: this is probably overly pessimistic. - if (MVT::isFloatingPoint(NVT) && NoExcessFPPrecision) + if (NVT.isFloatingPoint() && NoExcessFPPrecision) Result = DAG.getNode(ISD::FP_ROUND_INREG, NVT, Result, DAG.getValueType(VT)); break; @@ -4381,7 +4376,7 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { // These operators require that their input be zero extended. Tmp1 = PromoteOp(Node->getOperand(0)); Tmp2 = PromoteOp(Node->getOperand(1)); - assert(MVT::isInteger(NVT) && "Operators don't apply to FP!"); + assert(NVT.isInteger() && "Operators don't apply to FP!"); Tmp1 = DAG.getZeroExtendInReg(Tmp1, VT); Tmp2 = DAG.getZeroExtendInReg(Tmp2, VT); Result = DAG.getNode(Node->getOpcode(), NVT, Tmp1, Tmp2); @@ -4416,7 +4411,7 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0); // Increment the pointer, VAList, to the next vaarg Tmp3 = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList, - DAG.getConstant(MVT::getSizeInBits(VT)/8, + DAG.getConstant(VT.getSizeInBits()/8, TLI.getPointerTy())); // Store the incremented VAList to the legalized pointer Tmp3 = DAG.getStore(VAList.getValue(1), Tmp3, Tmp2, V, 0); @@ -4445,7 +4440,7 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { Tmp2 = PromoteOp(Node->getOperand(1)); // Legalize the op0 Tmp3 = PromoteOp(Node->getOperand(2)); // Legalize the op1 - unsigned VT2 = Tmp2.getValueType(); + MVT VT2 = Tmp2.getValueType(); assert(VT2 == Tmp3.getValueType() && "PromoteOp SELECT: Operands 2 and 3 ValueTypes don't match"); // Ensure that the resulting node is at least the same size as the operands' @@ -4465,8 +4460,8 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, NVT, Tmp1); Tmp1 = DAG.getNode(ISD::BSWAP, NVT, Tmp1); Result = DAG.getNode(ISD::SRL, NVT, Tmp1, - DAG.getConstant(MVT::getSizeInBits(NVT) - - MVT::getSizeInBits(VT), + DAG.getConstant(NVT.getSizeInBits() - + VT.getSizeInBits(), TLI.getShiftAmountTy())); break; case ISD::CTPOP: @@ -4483,16 +4478,16 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { case ISD::CTTZ: // if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT) Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(Tmp1), Tmp1, - DAG.getConstant(MVT::getSizeInBits(NVT), NVT), + DAG.getConstant(NVT.getSizeInBits(), NVT), ISD::SETEQ); Result = DAG.getNode(ISD::SELECT, NVT, Tmp2, - DAG.getConstant(MVT::getSizeInBits(VT), NVT), Tmp1); + DAG.getConstant(VT.getSizeInBits(), NVT), Tmp1); break; case ISD::CTLZ: //Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) Result = DAG.getNode(ISD::SUB, NVT, Tmp1, - DAG.getConstant(MVT::getSizeInBits(NVT) - - MVT::getSizeInBits(VT), NVT)); + DAG.getConstant(NVT.getSizeInBits() - + VT.getSizeInBits(), NVT)); break; } break; @@ -4525,8 +4520,8 @@ SDOperand SelectionDAGLegalize::ExpandEXTRACT_VECTOR_ELT(SDOperand Op) { SDOperand Vec = Op.getOperand(0); SDOperand Idx = Op.getOperand(1); - MVT::ValueType TVT = Vec.getValueType(); - unsigned NumElems = MVT::getVectorNumElements(TVT); + MVT TVT = Vec.getValueType(); + unsigned NumElems = TVT.getVectorNumElements(); switch (TLI.getOperationAction(ISD::EXTRACT_VECTOR_ELT, TVT)) { default: assert(0 && "This action is not supported yet!"); @@ -4575,12 +4570,11 @@ SDOperand SelectionDAGLegalize::ExpandEXTRACT_VECTOR_ELT(SDOperand Op) { SDOperand Ch = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0); // Add the offset to the index. - unsigned EltSize = MVT::getSizeInBits(Op.getValueType())/8; + unsigned EltSize = Op.getValueType().getSizeInBits()/8; Idx = DAG.getNode(ISD::MUL, Idx.getValueType(), Idx, DAG.getConstant(EltSize, Idx.getValueType())); - if (MVT::getSizeInBits(Idx.getValueType()) > - MVT::getSizeInBits(TLI.getPointerTy())) + if (Idx.getValueType().getSizeInBits() > TLI.getPointerTy().getSizeInBits()) Idx = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), Idx); else Idx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), Idx); @@ -4600,9 +4594,9 @@ SDOperand SelectionDAGLegalize::ExpandEXTRACT_SUBVECTOR(SDOperand Op) { SDOperand Vec = Op.getOperand(0); SDOperand Idx = LegalizeOp(Op.getOperand(1)); - unsigned NumElems = MVT::getVectorNumElements(Vec.getValueType()); + unsigned NumElems = Vec.getValueType().getVectorNumElements(); - if (NumElems == MVT::getVectorNumElements(Op.getValueType())) { + if (NumElems == Op.getValueType().getVectorNumElements()) { // This must be an access of the desired vector length. Return it. return Vec; } @@ -4643,9 +4637,9 @@ void SelectionDAGLegalize::LegalizeSetCCOperands(SDOperand &LHS, Tmp2 = PromoteOp(RHS); // RHS // If this is an FP compare, the operands have already been extended. - if (MVT::isInteger(LHS.getValueType())) { - MVT::ValueType VT = LHS.getValueType(); - MVT::ValueType NVT = TLI.getTypeToTransformTo(VT); + if (LHS.getValueType().isInteger()) { + MVT VT = LHS.getValueType(); + MVT NVT = TLI.getTypeToTransformTo(VT); // Otherwise, we have to insert explicit sign or zero extends. Note // that we could insert sign extends for ALL conditions, but zero extend @@ -4678,7 +4672,7 @@ void SelectionDAGLegalize::LegalizeSetCCOperands(SDOperand &LHS, } break; case Expand: { - MVT::ValueType VT = LHS.getValueType(); + MVT VT = LHS.getValueType(); if (VT == MVT::f32 || VT == MVT::f64) { // Expand into one or more soft-fp libcall(s). RTLIB::Libcall LC1, LC2 = RTLIB::UNKNOWN_LIBCALL; @@ -4881,17 +4875,17 @@ void SelectionDAGLegalize::LegalizeSetCCOperands(SDOperand &LHS, /// a load from the stack slot to DestVT, extending it if needed. /// The resultant code need not be legal. SDOperand SelectionDAGLegalize::EmitStackConvert(SDOperand SrcOp, - MVT::ValueType SlotVT, - MVT::ValueType DestVT) { + MVT SlotVT, + MVT DestVT) { // Create the stack frame object. SDOperand FIPtr = DAG.CreateStackTemporary(SlotVT); FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); int SPFI = StackPtrFI->getIndex(); - unsigned SrcSize = MVT::getSizeInBits(SrcOp.getValueType()); - unsigned SlotSize = MVT::getSizeInBits(SlotVT); - unsigned DestSize = MVT::getSizeInBits(DestVT); + unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); + unsigned SlotSize = SlotVT.getSizeInBits(); + unsigned DestSize = DestVT.getSizeInBits(); // Emit a store to the stack slot. Use a truncstore if the input value is // later than DestVT. @@ -4904,7 +4898,7 @@ SDOperand SelectionDAGLegalize::EmitStackConvert(SDOperand SrcOp, assert(SrcSize == SlotSize && "Invalid store"); Store = DAG.getStore(DAG.getEntryNode(), SrcOp, FIPtr, PseudoSourceValue::getFixedStack(), - SPFI, SlotVT); + SPFI); } // Result is a load from the stack slot. @@ -4975,7 +4969,7 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { // If all elements are constants, create a load from the constant pool. if (isConstant) { - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); std::vector<Constant*> CV; for (unsigned i = 0, e = NumElems; i != e; ++i) { if (ConstantFPSDNode *V = @@ -4987,7 +4981,7 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { } else { assert(Node->getOperand(i).getOpcode() == ISD::UNDEF); const Type *OpNTy = - MVT::getTypeForValueType(Node->getOperand(0).getValueType()); + Node->getOperand(0).getValueType().getTypeForMVT(); CV.push_back(UndefValue::get(OpNTy)); } } @@ -4999,9 +4993,8 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { if (SplatValue.Val) { // Splat of one value? // Build the shuffle constant vector: <0, 0, 0, 0> - MVT::ValueType MaskVT = - MVT::getIntVectorWithNumElements(NumElems); - SDOperand Zero = DAG.getConstant(0, MVT::getVectorElementType(MaskVT)); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + SDOperand Zero = DAG.getConstant(0, MaskVT.getVectorElementType()); std::vector<SDOperand> ZeroVec(NumElems, Zero); SDOperand SplatMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &ZeroVec[0], ZeroVec.size()); @@ -5037,8 +5030,8 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { std::swap(Val1, Val2); // Build the shuffle constant vector: e.g. <0, 4, 0, 4> - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType MaskEltVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT MaskEltVT = MaskVT.getVectorElementType(); std::vector<SDOperand> MaskVec(NumElems); // Set elements of the shuffle mask for Val1. @@ -5072,14 +5065,13 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { // Otherwise, we can't handle this case efficiently. Allocate a sufficiently // aligned object on the stack, store each element into it, then load // the result as a vector. - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); // Create the stack frame object. SDOperand FIPtr = DAG.CreateStackTemporary(VT); // Emit a store of each element to the stack slot. SmallVector<SDOperand, 8> Stores; - unsigned TypeByteSize = - MVT::getSizeInBits(Node->getOperand(0).getValueType())/8; + unsigned TypeByteSize = Node->getOperand(0).getValueType().getSizeInBits()/8; // Store (in the right endianness) the elements to memory. for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { // Ignore undef elements. @@ -5113,7 +5105,7 @@ void SelectionDAGLegalize::ExpandShiftParts(unsigned NodeOp, ExpandOp(Op, LHSL, LHSH); SDOperand Ops[] = { LHSL, LHSH, Amt }; - MVT::ValueType VT = LHSL.getValueType(); + MVT VT = LHSL.getValueType(); Lo = DAG.getNode(NodeOp, DAG.getNodeValueTypes(VT, VT), 2, Ops, 3); Hi = Lo.getValue(1); } @@ -5128,12 +5120,12 @@ bool SelectionDAGLegalize::ExpandShift(unsigned Opc, SDOperand Op,SDOperand Amt, assert((Opc == ISD::SHL || Opc == ISD::SRA || Opc == ISD::SRL) && "This is not a shift!"); - MVT::ValueType NVT = TLI.getTypeToTransformTo(Op.getValueType()); + MVT NVT = TLI.getTypeToTransformTo(Op.getValueType()); SDOperand ShAmt = LegalizeOp(Amt); - MVT::ValueType ShTy = ShAmt.getValueType(); - unsigned ShBits = MVT::getSizeInBits(ShTy); - unsigned VTBits = MVT::getSizeInBits(Op.getValueType()); - unsigned NVTBits = MVT::getSizeInBits(NVT); + MVT ShTy = ShAmt.getValueType(); + unsigned ShBits = ShTy.getSizeInBits(); + unsigned VTBits = Op.getValueType().getSizeInBits(); + unsigned NVTBits = NVT.getSizeInBits(); // Handle the case when Amt is an immediate. if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Amt.Val)) { @@ -5284,8 +5276,8 @@ SDOperand SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { - MVT::ValueType ArgVT = Node->getOperand(i).getValueType(); - const Type *ArgTy = MVT::getTypeForValueType(ArgVT); + MVT ArgVT = Node->getOperand(i).getValueType(); + const Type *ArgTy = ArgVT.getTypeForMVT(); Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; Entry.isSExt = isSigned; Entry.isZExt = !isSigned; @@ -5295,7 +5287,7 @@ SDOperand SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, TLI.getPointerTy()); // Splice the libcall in wherever FindInputOutputChains tells us to. - const Type *RetTy = MVT::getTypeForValueType(Node->getValueType(0)); + const Type *RetTy = Node->getValueType(0).getTypeForMVT(); std::pair<SDOperand,SDOperand> CallInfo = TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, CallingConv::C, false, Callee, Args, DAG); @@ -5321,8 +5313,8 @@ SDOperand SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, /// ExpandIntToFP - Expand a [US]INT_TO_FP operation. /// SDOperand SelectionDAGLegalize:: -ExpandIntToFP(bool isSigned, MVT::ValueType DestTy, SDOperand Source) { - MVT::ValueType SourceVT = Source.getValueType(); +ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source) { + MVT SourceVT = Source.getValueType(); bool ExpandSource = getTypeAction(SourceVT) == Expand; // Special case for i32 source to take advantage of UINTTOFP_I32_F32, etc. @@ -5360,7 +5352,7 @@ ExpandIntToFP(bool isSigned, MVT::ValueType DestTy, SDOperand Source) { if (DestTy == MVT::f32) FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx, PseudoSourceValue::getConstantPool(), 0); - else if (MVT::getSizeInBits(DestTy) > MVT::getSizeInBits(MVT::f32)) + else if (DestTy.getSizeInBits() > MVT(MVT::f32).getSizeInBits()) // FIXME: Avoid the extend by construction the right constantpool? FudgeInReg = DAG.getExtLoad(ISD::EXTLOAD, DestTy, DAG.getEntryNode(), CPIdx, @@ -5369,12 +5361,12 @@ ExpandIntToFP(bool isSigned, MVT::ValueType DestTy, SDOperand Source) { else assert(0 && "Unexpected conversion"); - MVT::ValueType SCVT = SignedConv.getValueType(); + MVT SCVT = SignedConv.getValueType(); if (SCVT != DestTy) { // Destination type needs to be expanded as well. The FADD now we are // constructing will be expanded into a libcall. - if (MVT::getSizeInBits(SCVT) != MVT::getSizeInBits(DestTy)) { - assert(MVT::getSizeInBits(SCVT) * 2 == MVT::getSizeInBits(DestTy)); + if (SCVT.getSizeInBits() != DestTy.getSizeInBits()) { + assert(SCVT.getSizeInBits() * 2 == DestTy.getSizeInBits()); SignedConv = DAG.getNode(ISD::BUILD_PAIR, DestTy, SignedConv, SignedConv.getValue(1)); } @@ -5455,7 +5447,7 @@ ExpandIntToFP(bool isSigned, MVT::ValueType DestTy, SDOperand Source) { /// legal for the target. SDOperand SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, SDOperand Op0, - MVT::ValueType DestVT) { + MVT DestVT) { if (Op0.getValueType() == MVT::i32) { // simple 32-bit [signed|unsigned] integer to float/double expansion @@ -5501,10 +5493,12 @@ SDOperand SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, if (DestVT == MVT::f64) { // do nothing Result = Sub; - } else if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(MVT::f64)) { + } else if (DestVT.getSizeInBits() < + MVT(MVT::f64).getSizeInBits()) { Result = DAG.getNode(ISD::FP_ROUND, DestVT, Sub, DAG.getIntPtrConstant(0)); - } else if (MVT::getSizeInBits(DestVT) > MVT::getSizeInBits(MVT::f64)) { + } else if (DestVT.getSizeInBits() > + MVT(MVT::f64).getSizeInBits()) { Result = DAG.getNode(ISD::FP_EXTEND, DestVT, Sub); } return Result; @@ -5523,7 +5517,7 @@ SDOperand SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, // as a negative number. To counteract this, the dynamic code adds an // offset depending on the data type. uint64_t FF; - switch (Op0.getValueType()) { + switch (Op0.getValueType().getSimpleVT()) { default: assert(0 && "Unsupported integer type!"); case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) @@ -5556,17 +5550,17 @@ SDOperand SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP /// operation that takes a larger input. SDOperand SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDOperand LegalOp, - MVT::ValueType DestVT, + MVT DestVT, bool isSigned) { // First step, figure out the appropriate *INT_TO_FP operation to use. - MVT::ValueType NewInTy = LegalOp.getValueType(); + MVT NewInTy = LegalOp.getValueType(); unsigned OpToUse = 0; // Scan for the appropriate larger type to use. while (1) { - NewInTy = (MVT::ValueType)(NewInTy+1); - assert(MVT::isInteger(NewInTy) && "Ran out of possibilities!"); + NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT()+1); + assert(NewInTy.isInteger() && "Ran out of possibilities!"); // If the target supports SINT_TO_FP of this type, use it. switch (TLI.getOperationAction(ISD::SINT_TO_FP, NewInTy)) { @@ -5611,17 +5605,17 @@ SDOperand SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDOperand LegalOp, /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT /// operation that returns a larger result. SDOperand SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDOperand LegalOp, - MVT::ValueType DestVT, + MVT DestVT, bool isSigned) { // First step, figure out the appropriate FP_TO*INT operation to use. - MVT::ValueType NewOutTy = DestVT; + MVT NewOutTy = DestVT; unsigned OpToUse = 0; // Scan for the appropriate larger type to use. while (1) { - NewOutTy = (MVT::ValueType)(NewOutTy+1); - assert(MVT::isInteger(NewOutTy) && "Ran out of possibilities!"); + NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT()+1); + assert(NewOutTy.isInteger() && "Ran out of possibilities!"); // If the target supports FP_TO_SINT returning this type, use it. switch (TLI.getOperationAction(ISD::FP_TO_SINT, NewOutTy)) { @@ -5672,10 +5666,10 @@ SDOperand SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDOperand LegalOp, /// ExpandBSWAP - Open code the operations for BSWAP of the specified operation. /// SDOperand SelectionDAGLegalize::ExpandBSWAP(SDOperand Op) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType SHVT = TLI.getShiftAmountTy(); + MVT VT = Op.getValueType(); + MVT SHVT = TLI.getShiftAmountTy(); SDOperand Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; - switch (VT) { + switch (VT.getSimpleVT()) { default: assert(0 && "Unhandled Expand type in BSWAP!"); abort(); case MVT::i16: Tmp2 = DAG.getNode(ISD::SHL, VT, Op, DAG.getConstant(8, SHVT)); @@ -5727,9 +5721,9 @@ SDOperand SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDOperand Op) { 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL }; - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType ShVT = TLI.getShiftAmountTy(); - unsigned len = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + MVT ShVT = TLI.getShiftAmountTy(); + unsigned len = VT.getSizeInBits(); for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { //x = (x & mask[i][len/8]) + (x >> (1 << i) & mask[i][len/8]) SDOperand Tmp2 = DAG.getConstant(mask[i], VT); @@ -5750,9 +5744,9 @@ SDOperand SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDOperand Op) { // return popcount(~x); // // but see also: http://www.hackersdelight.org/HDcode/nlz.cc - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType ShVT = TLI.getShiftAmountTy(); - unsigned len = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + MVT ShVT = TLI.getShiftAmountTy(); + unsigned len = VT.getSizeInBits(); for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { SDOperand Tmp3 = DAG.getConstant(1ULL << i, ShVT); Op = DAG.getNode(ISD::OR, VT, Op, DAG.getNode(ISD::SRL, VT, Op, Tmp3)); @@ -5765,7 +5759,7 @@ SDOperand SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDOperand Op) { // unless the target has ctlz but not ctpop, in which case we use: // { return 32 - nlz(~x & (x-1)); } // see also http://www.hackersdelight.org/HDcode/ntz.cc - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand Tmp2 = DAG.getConstant(~0ULL, VT); SDOperand Tmp3 = DAG.getNode(ISD::AND, VT, DAG.getNode(ISD::XOR, VT, Op, Tmp2), @@ -5774,7 +5768,7 @@ SDOperand SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDOperand Op) { if (!TLI.isOperationLegal(ISD::CTPOP, VT) && TLI.isOperationLegal(ISD::CTLZ, VT)) return DAG.getNode(ISD::SUB, VT, - DAG.getConstant(MVT::getSizeInBits(VT), VT), + DAG.getConstant(VT.getSizeInBits(), VT), DAG.getNode(ISD::CTLZ, VT, Tmp3)); return DAG.getNode(ISD::CTPOP, VT, Tmp3); } @@ -5787,13 +5781,12 @@ SDOperand SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDOperand Op) { /// ExpandedNodes map is filled in for any results that are expanded, and the /// Lo/Hi values are returned. void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType NVT = TLI.getTypeToTransformTo(VT); + MVT VT = Op.getValueType(); + MVT NVT = TLI.getTypeToTransformTo(VT); SDNode *Node = Op.Val; assert(getTypeAction(VT) == Expand && "Not an expanded type!"); - assert(((MVT::isInteger(NVT) && NVT < VT) || MVT::isFloatingPoint(VT) || - MVT::isVector(VT)) && - "Cannot expand to FP value or to larger int value!"); + assert(((NVT.isInteger() && NVT < VT) || VT.isFloatingPoint() || + VT.isVector()) && "Cannot expand to FP value or to larger int value!"); // See if we already expanded it. DenseMap<SDOperand, std::pair<SDOperand, SDOperand> >::iterator I @@ -5844,7 +5837,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ Hi = DAG.getNode(ISD::UNDEF, NVT); break; case ISD::Constant: { - unsigned NVTBits = MVT::getSizeInBits(NVT); + unsigned NVTBits = NVT.getSizeInBits(); const APInt &Cst = cast<ConstantSDNode>(Node)->getAPIntValue(); Lo = DAG.getConstant(APInt(Cst).trunc(NVTBits), NVT); Hi = DAG.getConstant(Cst.lshr(NVTBits).trunc(NVTBits), NVT); @@ -5893,7 +5886,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ // The high part gets the sign extension from the lo-part. This handles // things like sextinreg V:i64 from i8. Hi = DAG.getNode(ISD::SRA, NVT, Lo, - DAG.getConstant(MVT::getSizeInBits(NVT)-1, + DAG.getConstant(NVT.getSizeInBits()-1, TLI.getShiftAmountTy())); break; @@ -5916,7 +5909,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::CTLZ: { // ctlz (HL) -> ctlz(H) != 32 ? ctlz(H) : (ctlz(L)+32) ExpandOp(Node->getOperand(0), Lo, Hi); - SDOperand BitsC = DAG.getConstant(MVT::getSizeInBits(NVT), NVT); + SDOperand BitsC = DAG.getConstant(NVT.getSizeInBits(), NVT); SDOperand HLZ = DAG.getNode(ISD::CTLZ, NVT, Hi); SDOperand TopNotZero = DAG.getSetCC(TLI.getSetCCResultType(HLZ), HLZ, BitsC, ISD::SETNE); @@ -5931,7 +5924,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::CTTZ: { // cttz (HL) -> cttz(L) != 32 ? cttz(L) : (cttz(H)+32) ExpandOp(Node->getOperand(0), Lo, Hi); - SDOperand BitsC = DAG.getConstant(MVT::getSizeInBits(NVT), NVT); + SDOperand BitsC = DAG.getConstant(NVT.getSizeInBits(), NVT); SDOperand LTZ = DAG.getNode(ISD::CTTZ, NVT, Lo); SDOperand BotNotZero = DAG.getSetCC(TLI.getSetCCResultType(LTZ), LTZ, BitsC, ISD::SETNE); @@ -5980,7 +5973,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ } // Increment the pointer to the other half. - unsigned IncrementSize = MVT::getSizeInBits(Lo.getValueType())/8; + unsigned IncrementSize = Lo.getValueType().getSizeInBits()/8; Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr, DAG.getIntPtrConstant(IncrementSize)); SVOffset += IncrementSize; @@ -5998,7 +5991,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ if (TLI.isBigEndian()) std::swap(Lo, Hi); } else { - MVT::ValueType EVT = LD->getMemoryVT(); + MVT EVT = LD->getMemoryVT(); if ((VT == MVT::f64 && EVT == MVT::f32) || (VT == MVT::ppcf128 && (EVT==MVT::f64 || EVT==MVT::f32))) { @@ -6025,7 +6018,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ if (ExtType == ISD::SEXTLOAD) { // The high part is obtained by SRA'ing all but one of the bits of the // lo part. - unsigned LoSize = MVT::getSizeInBits(Lo.getValueType()); + unsigned LoSize = Lo.getValueType().getSizeInBits(); Hi = DAG.getNode(ISD::SRA, NVT, Lo, DAG.getConstant(LoSize-1, TLI.getShiftAmountTy())); } else if (ExtType == ISD::ZEXTLOAD) { @@ -6085,7 +6078,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ // The high part is obtained by SRA'ing all but one of the bits of the lo // part. - unsigned LoSize = MVT::getSizeInBits(Lo.getValueType()); + unsigned LoSize = Lo.getValueType().getSizeInBits(); Hi = DAG.getNode(ISD::SRA, NVT, Lo, DAG.getConstant(LoSize-1, TLI.getShiftAmountTy())); break; @@ -6134,7 +6127,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ // If source operand will be expanded to the same type as VT, i.e. // i64 <- f64, i32 <- f32, expand the source operand instead. - MVT::ValueType VT0 = Node->getOperand(0).getValueType(); + MVT VT0 = Node->getOperand(0).getValueType(); if (getTypeAction(VT0) == Expand && TLI.getTypeToTransformTo(VT0) == VT) { ExpandOp(Node->getOperand(0), Lo, Hi); break; @@ -6668,7 +6661,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: { bool isSigned = Node->getOpcode() == ISD::SINT_TO_FP; - MVT::ValueType SrcVT = Node->getOperand(0).getValueType(); + MVT SrcVT = Node->getOperand(0).getValueType(); // Promote the operand if needed. Do this before checking for // ppcf128 so conversions of i16 and i8 work. @@ -6753,18 +6746,18 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ /// two smaller values, still of vector type. void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi) { - assert(MVT::isVector(Op.getValueType()) && "Cannot split non-vector type!"); + assert(Op.getValueType().isVector() && "Cannot split non-vector type!"); SDNode *Node = Op.Val; - unsigned NumElements = MVT::getVectorNumElements(Op.getValueType()); + unsigned NumElements = Op.getValueType().getVectorNumElements(); assert(NumElements > 1 && "Cannot split a single element vector!"); - MVT::ValueType NewEltVT = MVT::getVectorElementType(Op.getValueType()); + MVT NewEltVT = Op.getValueType().getVectorElementType(); unsigned NewNumElts_Lo = 1 << Log2_32(NumElements-1); unsigned NewNumElts_Hi = NumElements - NewNumElts_Lo; - MVT::ValueType NewVT_Lo = MVT::getVectorType(NewEltVT, NewNumElts_Lo); - MVT::ValueType NewVT_Hi = MVT::getVectorType(NewEltVT, NewNumElts_Hi); + MVT NewVT_Lo = MVT::getVectorVT(NewEltVT, NewNumElts_Lo); + MVT NewVT_Hi = MVT::getVectorVT(NewEltVT, NewNumElts_Hi); // See if we already split it. std::map<SDOperand, std::pair<SDOperand, SDOperand> >::iterator I @@ -6812,7 +6805,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, // Build the low part. SDOperand Mask = Node->getOperand(2); SmallVector<SDOperand, 8> Ops; - MVT::ValueType PtrVT = TLI.getPointerTy(); + MVT PtrVT = TLI.getPointerTy(); // Insert all of the elements from the input that are needed. We use // buildvector of extractelement here because the input vectors will have @@ -6887,7 +6880,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, SplitVectorOp(Node->getOperand(1), LL, LH); SplitVectorOp(Node->getOperand(2), RL, RH); - if (MVT::isVector(Cond.getValueType())) { + if (Cond.getValueType().isVector()) { // Handle a vector merge. SDOperand CL, CH; SplitVectorOp(Cond, CL, CH); @@ -6969,7 +6962,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, bool isVolatile = LD->isVolatile(); Lo = DAG.getLoad(NewVT_Lo, Ch, Ptr, SV, SVOffset, isVolatile, Alignment); - unsigned IncrementSize = NewNumElts_Lo * MVT::getSizeInBits(NewEltVT)/8; + unsigned IncrementSize = NewNumElts_Lo * NewEltVT.getSizeInBits()/8; Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr, DAG.getIntPtrConstant(IncrementSize)); SVOffset += IncrementSize; @@ -6989,8 +6982,8 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, // We know the result is a vector. The input may be either a vector or a // scalar value. SDOperand InOp = Node->getOperand(0); - if (!MVT::isVector(InOp.getValueType()) || - MVT::getVectorNumElements(InOp.getValueType()) == 1) { + if (!InOp.getValueType().isVector() || + InOp.getValueType().getVectorNumElements() == 1) { // The input is a scalar or single-element vector. // Lower to a store/load so that it can be split. // FIXME: this could be improved probably. @@ -7024,11 +7017,10 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, /// (e.g. v1f32), convert it into the equivalent operation that returns a /// scalar (e.g. f32) value. SDOperand SelectionDAGLegalize::ScalarizeVectorOp(SDOperand Op) { - assert(MVT::isVector(Op.getValueType()) && - "Bad ScalarizeVectorOp invocation!"); + assert(Op.getValueType().isVector() && "Bad ScalarizeVectorOp invocation!"); SDNode *Node = Op.Val; - MVT::ValueType NewVT = MVT::getVectorElementType(Op.getValueType()); - assert(MVT::getVectorNumElements(Op.getValueType()) == 1); + MVT NewVT = Op.getValueType().getVectorElementType(); + assert(Op.getValueType().getVectorNumElements() == 1); // See if we already scalarized it. std::map<SDOperand, SDOperand>::iterator I = ScalarizedNodes.find(Op); @@ -7118,7 +7110,7 @@ SDOperand SelectionDAGLegalize::ScalarizeVectorOp(SDOperand Op) { break; case ISD::BIT_CONVERT: { SDOperand Op0 = Op.getOperand(0); - if (MVT::getVectorNumElements(Op0.getValueType()) == 1) + if (Op0.getValueType().getVectorNumElements() == 1) Op0 = ScalarizeVectorOp(Op0); Result = DAG.getNode(ISD::BIT_CONVERT, NewVT, Op0); break; diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index c0cfe21..61d0e45 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -68,7 +68,7 @@ void DAGTypeLegalizer::run() { unsigned i = 0; unsigned NumResults = N->getNumValues(); do { - MVT::ValueType ResultVT = N->getValueType(i); + MVT ResultVT = N->getValueType(i); switch (getTypeAction(ResultVT)) { default: assert(false && "Unknown action!"); @@ -98,7 +98,7 @@ void DAGTypeLegalizer::run() { unsigned NumOperands = N->getNumOperands(); bool NeedsRevisit = false; for (i = 0; i != NumOperands; ++i) { - MVT::ValueType OpVT = N->getOperand(i).getValueType(); + MVT OpVT = N->getOperand(i).getValueType(); switch (getTypeAction(OpVT)) { default: assert(false && "Unknown action!"); @@ -472,13 +472,12 @@ void DAGTypeLegalizer::SetSplitOp(SDOperand Op, SDOperand Lo, SDOperand Hi) { /// BitConvertToInteger - Convert to an integer of the same size. SDOperand DAGTypeLegalizer::BitConvertToInteger(SDOperand Op) { - return DAG.getNode(ISD::BIT_CONVERT, - MVT::getIntegerType(MVT::getSizeInBits(Op.getValueType())), - Op); + unsigned BitWidth = Op.getValueType().getSizeInBits(); + return DAG.getNode(ISD::BIT_CONVERT, MVT::getIntegerVT(BitWidth), Op); } SDOperand DAGTypeLegalizer::CreateStackStoreLoad(SDOperand Op, - MVT::ValueType DestVT) { + MVT DestVT) { // Create the stack frame object. SDOperand FIPtr = DAG.CreateStackTemporary(DestVT); @@ -490,14 +489,13 @@ SDOperand DAGTypeLegalizer::CreateStackStoreLoad(SDOperand Op, /// JoinIntegers - Build an integer with low bits Lo and high bits Hi. SDOperand DAGTypeLegalizer::JoinIntegers(SDOperand Lo, SDOperand Hi) { - MVT::ValueType LVT = Lo.getValueType(); - MVT::ValueType HVT = Hi.getValueType(); - MVT::ValueType NVT = MVT::getIntegerType(MVT::getSizeInBits(LVT) + - MVT::getSizeInBits(HVT)); + MVT LVT = Lo.getValueType(); + MVT HVT = Hi.getValueType(); + MVT NVT = MVT::getIntegerVT(LVT.getSizeInBits() + HVT.getSizeInBits()); Lo = DAG.getNode(ISD::ZERO_EXTEND, NVT, Lo); Hi = DAG.getNode(ISD::ANY_EXTEND, NVT, Hi); - Hi = DAG.getNode(ISD::SHL, NVT, Hi, DAG.getConstant(MVT::getSizeInBits(LVT), + Hi = DAG.getNode(ISD::SHL, NVT, Hi, DAG.getConstant(LVT.getSizeInBits(), TLI.getShiftAmountTy())); return DAG.getNode(ISD::OR, NVT, Lo, Hi); } @@ -505,13 +503,13 @@ SDOperand DAGTypeLegalizer::JoinIntegers(SDOperand Lo, SDOperand Hi) { /// SplitInteger - Return the lower LoVT bits of Op in Lo and the upper HiVT /// bits in Hi. void DAGTypeLegalizer::SplitInteger(SDOperand Op, - MVT::ValueType LoVT, MVT::ValueType HiVT, + MVT LoVT, MVT HiVT, SDOperand &Lo, SDOperand &Hi) { - assert(MVT::getSizeInBits(LoVT) + MVT::getSizeInBits(HiVT) == - MVT::getSizeInBits(Op.getValueType()) && "Invalid integer splitting!"); + assert(LoVT.getSizeInBits() + HiVT.getSizeInBits() == + Op.getValueType().getSizeInBits() && "Invalid integer splitting!"); Lo = DAG.getNode(ISD::TRUNCATE, LoVT, Op); Hi = DAG.getNode(ISD::SRL, Op.getValueType(), Op, - DAG.getConstant(MVT::getSizeInBits(LoVT), + DAG.getConstant(LoVT.getSizeInBits(), TLI.getShiftAmountTy())); Hi = DAG.getNode(ISD::TRUNCATE, HiVT, Hi); } @@ -520,14 +518,13 @@ void DAGTypeLegalizer::SplitInteger(SDOperand Op, /// half the size of Op's. void DAGTypeLegalizer::SplitInteger(SDOperand Op, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType HalfVT = - MVT::getIntegerType(MVT::getSizeInBits(Op.getValueType())/2); + MVT HalfVT = MVT::getIntegerVT(Op.getValueType().getSizeInBits()/2); SplitInteger(Op, HalfVT, HalfVT, Lo, Hi); } /// MakeLibCall - Generate a libcall taking the given operands as arguments and /// returning a result of type RetVT. -SDOperand DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT::ValueType RetVT, +SDOperand DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT RetVT, const SDOperand *Ops, unsigned NumOps, bool isSigned) { TargetLowering::ArgListTy Args; @@ -536,7 +533,7 @@ SDOperand DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT::ValueType RetVT, TargetLowering::ArgListEntry Entry; for (unsigned i = 0; i != NumOps; ++i) { Entry.Node = Ops[i]; - Entry.Ty = MVT::getTypeForValueType(Entry.Node.getValueType()); + Entry.Ty = Entry.Node.getValueType().getTypeForMVT(); Entry.isSExt = isSigned; Entry.isZExt = !isSigned; Args.push_back(Entry); @@ -544,7 +541,7 @@ SDOperand DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT::ValueType RetVT, SDOperand Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), TLI.getPointerTy()); - const Type *RetTy = MVT::getTypeForValueType(RetVT); + const Type *RetTy = RetVT.getTypeForMVT(); std::pair<SDOperand,SDOperand> CallInfo = TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, CallingConv::C, false, Callee, Args, DAG); diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h index a983c95..db9adc6 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -77,7 +77,7 @@ private: /// we need to expand it into multiple registers of a smaller integer type, or /// we need to scalarize a one-element vector type into the element type, or /// we need to split a vector type into smaller vector types. - LegalizeAction getTypeAction(MVT::ValueType VT) const { + LegalizeAction getTypeAction(MVT VT) const { switch (ValueTypeActions.getTypeAction(VT)) { default: assert(false && "Unknown legalize action!"); @@ -89,13 +89,12 @@ private: // Expand can mean // 1) split scalar in half, 2) convert a float to an integer, // 3) scalarize a single-element vector, 4) split a vector in two. - if (!MVT::isVector(VT)) { - if (MVT::getSizeInBits(VT) == - MVT::getSizeInBits(TLI.getTypeToTransformTo(VT))) + if (!VT.isVector()) { + if (VT.getSizeInBits() == TLI.getTypeToTransformTo(VT).getSizeInBits()) return FloatToInt; else return Expand; - } else if (MVT::getVectorNumElements(VT) == 1) { + } else if (VT.getVectorNumElements() == 1) { return Scalarize; } else { return Split; @@ -104,7 +103,7 @@ private: } /// isTypeLegal - Return true if this type is legal on this target. - bool isTypeLegal(MVT::ValueType VT) const { + bool isTypeLegal(MVT VT) const { return ValueTypeActions.getTypeAction(VT) == TargetLowering::Legal; } @@ -164,12 +163,12 @@ private: // Common routines. SDOperand BitConvertToInteger(SDOperand Op); - SDOperand CreateStackStoreLoad(SDOperand Op, MVT::ValueType DestVT); + SDOperand CreateStackStoreLoad(SDOperand Op, MVT DestVT); SDOperand JoinIntegers(SDOperand Lo, SDOperand Hi); void SplitInteger(SDOperand Op, SDOperand &Lo, SDOperand &Hi); - void SplitInteger(SDOperand Op, MVT::ValueType LoVT, MVT::ValueType HiVT, + void SplitInteger(SDOperand Op, MVT LoVT, MVT HiVT, SDOperand &Lo, SDOperand &Hi); - SDOperand MakeLibCall(RTLIB::Libcall LC, MVT::ValueType RetVT, + SDOperand MakeLibCall(RTLIB::Libcall LC, MVT RetVT, const SDOperand *Ops, unsigned NumOps, bool isSigned); //===--------------------------------------------------------------------===// @@ -187,7 +186,7 @@ private: /// GetPromotedZExtOp - Get a promoted operand and zero extend it to the final /// size. SDOperand GetPromotedZExtOp(SDOperand Op) { - MVT::ValueType OldVT = Op.getValueType(); + MVT OldVT = Op.getValueType(); Op = GetPromotedOp(Op); return DAG.getZeroExtendInReg(Op, OldVT); } @@ -292,10 +291,10 @@ private: SDOperand ExpandOperand_BUILD_VECTOR(SDNode *N); SDOperand ExpandOperand_EXTRACT_ELEMENT(SDNode *N); SDOperand ExpandOperand_SETCC(SDNode *N); - SDOperand ExpandOperand_SINT_TO_FP(SDOperand Source, MVT::ValueType DestTy); + SDOperand ExpandOperand_SINT_TO_FP(SDOperand Source, MVT DestTy); SDOperand ExpandOperand_STORE(StoreSDNode *N, unsigned OpNo); SDOperand ExpandOperand_TRUNCATE(SDNode *N); - SDOperand ExpandOperand_UINT_TO_FP(SDOperand Source, MVT::ValueType DestTy); + SDOperand ExpandOperand_UINT_TO_FP(SDOperand Source, MVT DestTy); void ExpandSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS, ISD::CondCode &CCCode); diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesExpand.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesExpand.cpp index a07ef78..2946424 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypesExpand.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypesExpand.cpp @@ -105,14 +105,14 @@ void DAGTypeLegalizer::ExpandResult(SDNode *N, unsigned ResNo) { void DAGTypeLegalizer::ExpandResult_UNDEF(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); Lo = Hi = DAG.getNode(ISD::UNDEF, NVT); } void DAGTypeLegalizer::ExpandResult_Constant(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - unsigned NBitWidth = MVT::getSizeInBits(NVT); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + unsigned NBitWidth = NVT.getSizeInBits(); const APInt &Cst = cast<ConstantSDNode>(N)->getAPIntValue(); Lo = DAG.getConstant(APInt(Cst).trunc(NBitWidth), NVT); Hi = DAG.getConstant(Cst.lshr(NBitWidth).trunc(NBitWidth), NVT); @@ -148,9 +148,9 @@ void DAGTypeLegalizer::ExpandResult_MERGE_VALUES(SDNode *N, void DAGTypeLegalizer::ExpandResult_ANY_EXTEND(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); SDOperand Op = N->getOperand(0); - if (MVT::getSizeInBits(Op.getValueType()) <= MVT::getSizeInBits(NVT)) { + if (Op.getValueType().getSizeInBits() <= NVT.getSizeInBits()) { // The low part is any extension of the input (which degenerates to a copy). Lo = DAG.getNode(ISD::ANY_EXTEND, NVT, Op); Hi = DAG.getNode(ISD::UNDEF, NVT); // The high part is undefined. @@ -169,9 +169,9 @@ void DAGTypeLegalizer::ExpandResult_ANY_EXTEND(SDNode *N, void DAGTypeLegalizer::ExpandResult_ZERO_EXTEND(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); SDOperand Op = N->getOperand(0); - if (MVT::getSizeInBits(Op.getValueType()) <= MVT::getSizeInBits(NVT)) { + if (Op.getValueType().getSizeInBits() <= NVT.getSizeInBits()) { // The low part is zero extension of the input (which degenerates to a copy). Lo = DAG.getNode(ISD::ZERO_EXTEND, NVT, N->getOperand(0)); Hi = DAG.getConstant(0, NVT); // The high part is just a zero. @@ -186,20 +186,20 @@ void DAGTypeLegalizer::ExpandResult_ZERO_EXTEND(SDNode *N, // Split the promoted operand. This will simplify when it is expanded. SplitInteger(Res, Lo, Hi); unsigned ExcessBits = - MVT::getSizeInBits(Op.getValueType()) - MVT::getSizeInBits(NVT); - Hi = DAG.getZeroExtendInReg(Hi, MVT::getIntegerType(ExcessBits)); + Op.getValueType().getSizeInBits() - NVT.getSizeInBits(); + Hi = DAG.getZeroExtendInReg(Hi, MVT::getIntegerVT(ExcessBits)); } } void DAGTypeLegalizer::ExpandResult_SIGN_EXTEND(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); SDOperand Op = N->getOperand(0); - if (MVT::getSizeInBits(Op.getValueType()) <= MVT::getSizeInBits(NVT)) { + if (Op.getValueType().getSizeInBits() <= NVT.getSizeInBits()) { // The low part is sign extension of the input (which degenerates to a copy). Lo = DAG.getNode(ISD::SIGN_EXTEND, NVT, N->getOperand(0)); // The high part is obtained by SRA'ing all but one of the bits of low part. - unsigned LoSize = MVT::getSizeInBits(NVT); + unsigned LoSize = NVT.getSizeInBits(); Hi = DAG.getNode(ISD::SRA, NVT, Lo, DAG.getConstant(LoSize-1, TLI.getShiftAmountTy())); } else { @@ -213,23 +213,23 @@ void DAGTypeLegalizer::ExpandResult_SIGN_EXTEND(SDNode *N, // Split the promoted operand. This will simplify when it is expanded. SplitInteger(Res, Lo, Hi); unsigned ExcessBits = - MVT::getSizeInBits(Op.getValueType()) - MVT::getSizeInBits(NVT); + Op.getValueType().getSizeInBits() - NVT.getSizeInBits(); Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, Hi.getValueType(), Hi, - DAG.getValueType(MVT::getIntegerType(ExcessBits))); + DAG.getValueType(MVT::getIntegerVT(ExcessBits))); } } void DAGTypeLegalizer::ExpandResult_AssertZext(SDNode *N, SDOperand &Lo, SDOperand &Hi) { GetExpandedOp(N->getOperand(0), Lo, Hi); - MVT::ValueType NVT = Lo.getValueType(); - MVT::ValueType EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); - unsigned NVTBits = MVT::getSizeInBits(NVT); - unsigned EVTBits = MVT::getSizeInBits(EVT); + MVT NVT = Lo.getValueType(); + MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); + unsigned NVTBits = NVT.getSizeInBits(); + unsigned EVTBits = EVT.getSizeInBits(); if (NVTBits < EVTBits) { Hi = DAG.getNode(ISD::AssertZext, NVT, Hi, - DAG.getValueType(MVT::getIntegerType(EVTBits - NVTBits))); + DAG.getValueType(MVT::getIntegerVT(EVTBits - NVTBits))); } else { Lo = DAG.getNode(ISD::AssertZext, NVT, Lo, DAG.getValueType(EVT)); // The high part must be zero, make it explicit. @@ -239,19 +239,19 @@ void DAGTypeLegalizer::ExpandResult_AssertZext(SDNode *N, void DAGTypeLegalizer::ExpandResult_TRUNCATE(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); Lo = DAG.getNode(ISD::TRUNCATE, NVT, N->getOperand(0)); Hi = DAG.getNode(ISD::SRL, N->getOperand(0).getValueType(), N->getOperand(0), - DAG.getConstant(MVT::getSizeInBits(NVT), + DAG.getConstant(NVT.getSizeInBits(), TLI.getShiftAmountTy())); Hi = DAG.getNode(ISD::TRUNCATE, NVT, Hi); } void DAGTypeLegalizer::ExpandResult_BIT_CONVERT(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); SDOperand InOp = N->getOperand(0); - MVT::ValueType InVT = InOp.getValueType(); + MVT InVT = InOp.getValueType(); // Handle some special cases efficiently. switch (getTypeAction(InVT)) { @@ -299,9 +299,9 @@ void DAGTypeLegalizer::ExpandResult_BIT_CONVERT(SDNode *N, void DAGTypeLegalizer:: ExpandResult_SIGN_EXTEND_INREG(SDNode *N, SDOperand &Lo, SDOperand &Hi) { GetExpandedOp(N->getOperand(0), Lo, Hi); - MVT::ValueType EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); + MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); - if (MVT::getSizeInBits(EVT) <= MVT::getSizeInBits(Lo.getValueType())) { + if (EVT.getSizeInBits() <= Lo.getValueType().getSizeInBits()) { // sext_inreg the low part if needed. Lo = DAG.getNode(ISD::SIGN_EXTEND_INREG, Lo.getValueType(), Lo, N->getOperand(1)); @@ -309,21 +309,21 @@ ExpandResult_SIGN_EXTEND_INREG(SDNode *N, SDOperand &Lo, SDOperand &Hi) { // The high part gets the sign extension from the lo-part. This handles // things like sextinreg V:i64 from i8. Hi = DAG.getNode(ISD::SRA, Hi.getValueType(), Lo, - DAG.getConstant(MVT::getSizeInBits(Hi.getValueType())-1, + DAG.getConstant(Hi.getValueType().getSizeInBits()-1, TLI.getShiftAmountTy())); } else { // For example, extension of an i48 to an i64. Leave the low part alone, // sext_inreg the high part. unsigned ExcessBits = - MVT::getSizeInBits(EVT) - MVT::getSizeInBits(Lo.getValueType()); + EVT.getSizeInBits() - Lo.getValueType().getSizeInBits(); Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, Hi.getValueType(), Hi, - DAG.getValueType(MVT::getIntegerType(ExcessBits))); + DAG.getValueType(MVT::getIntegerVT(ExcessBits))); } } void DAGTypeLegalizer::ExpandResult_FP_TO_SINT(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); SDOperand Op = N->getOperand(0); RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; if (VT == MVT::i64) { @@ -352,7 +352,7 @@ void DAGTypeLegalizer::ExpandResult_FP_TO_SINT(SDNode *N, SDOperand &Lo, void DAGTypeLegalizer::ExpandResult_FP_TO_UINT(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); SDOperand Op = N->getOperand(0); RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; if (VT == MVT::i64) { @@ -382,8 +382,8 @@ void DAGTypeLegalizer::ExpandResult_FP_TO_UINT(SDNode *N, SDOperand &Lo, void DAGTypeLegalizer::ExpandResult_LOAD(LoadSDNode *N, SDOperand &Lo, SDOperand &Hi) { // FIXME: Add support for indexed loads. - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType NVT = TLI.getTypeToTransformTo(VT); + MVT VT = N->getValueType(0); + MVT NVT = TLI.getTypeToTransformTo(VT); SDOperand Ch = N->getChain(); // Legalize the chain. SDOperand Ptr = N->getBasePtr(); // Legalize the pointer. ISD::LoadExtType ExtType = N->getExtensionType(); @@ -391,13 +391,13 @@ void DAGTypeLegalizer::ExpandResult_LOAD(LoadSDNode *N, unsigned Alignment = N->getAlignment(); bool isVolatile = N->isVolatile(); - assert(!(MVT::getSizeInBits(NVT) & 7) && "Expanded type not byte sized!"); + assert(!(NVT.getSizeInBits() & 7) && "Expanded type not byte sized!"); if (ExtType == ISD::NON_EXTLOAD) { Lo = DAG.getLoad(NVT, Ch, Ptr, N->getSrcValue(), SVOffset, isVolatile, Alignment); // Increment the pointer to the other half. - unsigned IncrementSize = MVT::getSizeInBits(NVT)/8; + unsigned IncrementSize = NVT.getSizeInBits()/8; Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr, DAG.getIntPtrConstant(IncrementSize)); Hi = DAG.getLoad(NVT, Ch, Ptr, N->getSrcValue(), SVOffset+IncrementSize, @@ -411,8 +411,8 @@ void DAGTypeLegalizer::ExpandResult_LOAD(LoadSDNode *N, // Handle endianness of the load. if (TLI.isBigEndian()) std::swap(Lo, Hi); - } else if (MVT::getSizeInBits(N->getMemoryVT()) <= MVT::getSizeInBits(NVT)) { - MVT::ValueType EVT = N->getMemoryVT(); + } else if (N->getMemoryVT().getSizeInBits() <= NVT.getSizeInBits()) { + MVT EVT = N->getMemoryVT(); Lo = DAG.getExtLoad(ExtType, NVT, Ch, Ptr, N->getSrcValue(), SVOffset, EVT, isVolatile, Alignment); @@ -423,7 +423,7 @@ void DAGTypeLegalizer::ExpandResult_LOAD(LoadSDNode *N, if (ExtType == ISD::SEXTLOAD) { // The high part is obtained by SRA'ing all but one of the bits of the // lo part. - unsigned LoSize = MVT::getSizeInBits(Lo.getValueType()); + unsigned LoSize = Lo.getValueType().getSizeInBits(); Hi = DAG.getNode(ISD::SRA, NVT, Lo, DAG.getConstant(LoSize-1, TLI.getShiftAmountTy())); } else if (ExtType == ISD::ZEXTLOAD) { @@ -440,11 +440,11 @@ void DAGTypeLegalizer::ExpandResult_LOAD(LoadSDNode *N, isVolatile, Alignment); unsigned ExcessBits = - MVT::getSizeInBits(N->getMemoryVT()) - MVT::getSizeInBits(NVT); - MVT::ValueType NEVT = MVT::getIntegerType(ExcessBits); + N->getMemoryVT().getSizeInBits() - NVT.getSizeInBits(); + MVT NEVT = MVT::getIntegerVT(ExcessBits); // Increment the pointer to the other half. - unsigned IncrementSize = MVT::getSizeInBits(NVT)/8; + unsigned IncrementSize = NVT.getSizeInBits()/8; Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr, DAG.getIntPtrConstant(IncrementSize)); Hi = DAG.getExtLoad(ExtType, NVT, Ch, Ptr, N->getSrcValue(), @@ -458,14 +458,14 @@ void DAGTypeLegalizer::ExpandResult_LOAD(LoadSDNode *N, } else { // Big-endian - high bits are at low addresses. Favor aligned loads at // the cost of some bit-fiddling. - MVT::ValueType EVT = N->getMemoryVT(); - unsigned EBytes = MVT::getStoreSizeInBits(EVT)/8; - unsigned IncrementSize = MVT::getSizeInBits(NVT)/8; + MVT EVT = N->getMemoryVT(); + unsigned EBytes = EVT.getStoreSizeInBits()/8; + unsigned IncrementSize = NVT.getSizeInBits()/8; unsigned ExcessBits = (EBytes - IncrementSize)*8; // Load both the high bits and maybe some of the low bits. Hi = DAG.getExtLoad(ExtType, NVT, Ch, Ptr, N->getSrcValue(), SVOffset, - MVT::getIntegerType(MVT::getSizeInBits(EVT)-ExcessBits), + MVT::getIntegerVT(EVT.getSizeInBits() - ExcessBits), isVolatile, Alignment); // Increment the pointer to the other half. @@ -473,7 +473,8 @@ void DAGTypeLegalizer::ExpandResult_LOAD(LoadSDNode *N, DAG.getIntPtrConstant(IncrementSize)); // Load the rest of the low bits. Lo = DAG.getExtLoad(ISD::ZEXTLOAD, NVT, Ch, Ptr, N->getSrcValue(), - SVOffset+IncrementSize, MVT::getIntegerType(ExcessBits), + SVOffset+IncrementSize, + MVT::getIntegerVT(ExcessBits), isVolatile, MinAlign(Alignment, IncrementSize)); // Build a factor node to remember that this load is independent of the @@ -481,7 +482,7 @@ void DAGTypeLegalizer::ExpandResult_LOAD(LoadSDNode *N, Ch = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1), Hi.getValue(1)); - if (ExcessBits < MVT::getSizeInBits(NVT)) { + if (ExcessBits < NVT.getSizeInBits()) { // Transfer low bits from the bottom of Hi to the top of Lo. Lo = DAG.getNode(ISD::OR, NVT, Lo, DAG.getNode(ISD::SHL, NVT, Hi, @@ -489,7 +490,7 @@ void DAGTypeLegalizer::ExpandResult_LOAD(LoadSDNode *N, TLI.getShiftAmountTy()))); // Move high bits to the right position in Hi. Hi = DAG.getNode(ExtType == ISD::SEXTLOAD ? ISD::SRA : ISD::SRL, NVT, Hi, - DAG.getConstant(MVT::getSizeInBits(NVT) - ExcessBits, + DAG.getConstant(NVT.getSizeInBits() - ExcessBits, TLI.getShiftAmountTy())); } } @@ -602,8 +603,8 @@ void DAGTypeLegalizer::ExpandResult_ADDSUBE(SDNode *N, void DAGTypeLegalizer::ExpandResult_MUL(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType NVT = TLI.getTypeToTransformTo(VT); + MVT VT = N->getValueType(0); + MVT NVT = TLI.getTypeToTransformTo(VT); bool HasMULHS = TLI.isOperationLegal(ISD::MULHS, NVT); bool HasMULHU = TLI.isOperationLegal(ISD::MULHU, NVT); @@ -613,8 +614,8 @@ void DAGTypeLegalizer::ExpandResult_MUL(SDNode *N, SDOperand LL, LH, RL, RH; GetExpandedOp(N->getOperand(0), LL, LH); GetExpandedOp(N->getOperand(1), RL, RH); - unsigned OuterBitSize = MVT::getSizeInBits(VT); - unsigned BitSize = MVT::getSizeInBits(NVT); + unsigned OuterBitSize = VT.getSizeInBits(); + unsigned BitSize = NVT.getSizeInBits(); unsigned LHSSB = DAG.ComputeNumSignBits(N->getOperand(0)); unsigned RHSSB = DAG.ComputeNumSignBits(N->getOperand(1)); @@ -705,7 +706,7 @@ void DAGTypeLegalizer::ExpandResult_UREM(SDNode *N, void DAGTypeLegalizer::ExpandResult_Shift(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // If we can emit an efficient shift operation, do so now. Check to see if // the RHS is a constant. @@ -730,7 +731,7 @@ void DAGTypeLegalizer::ExpandResult_Shift(SDNode *N, // Next check to see if the target supports this SHL_PARTS operation or if it // will custom expand it. - MVT::ValueType NVT = TLI.getTypeToTransformTo(VT); + MVT NVT = TLI.getTypeToTransformTo(VT); TargetLowering::LegalizeAction Action = TLI.getOperationAction(PartsOpc, NVT); if ((Action == TargetLowering::Legal && TLI.isTypeLegal(NVT)) || Action == TargetLowering::Custom) { @@ -739,7 +740,7 @@ void DAGTypeLegalizer::ExpandResult_Shift(SDNode *N, GetExpandedOp(N->getOperand(0), LHSL, LHSH); SDOperand Ops[] = { LHSL, LHSH, N->getOperand(1) }; - MVT::ValueType VT = LHSL.getValueType(); + MVT VT = LHSL.getValueType(); Lo = DAG.getNode(PartsOpc, DAG.getNodeValueTypes(VT, VT), 2, Ops, 3); Hi = Lo.getValue(1); return; @@ -770,7 +771,7 @@ void DAGTypeLegalizer::ExpandResult_CTLZ(SDNode *N, SDOperand &Lo, SDOperand &Hi) { // ctlz (HiLo) -> Hi != 0 ? ctlz(Hi) : (ctlz(Lo)+32) GetExpandedOp(N->getOperand(0), Lo, Hi); - MVT::ValueType NVT = Lo.getValueType(); + MVT NVT = Lo.getValueType(); SDOperand HiNotZero = DAG.getSetCC(TLI.getSetCCResultType(Hi), Hi, DAG.getConstant(0, NVT), ISD::SETNE); @@ -780,7 +781,7 @@ void DAGTypeLegalizer::ExpandResult_CTLZ(SDNode *N, Lo = DAG.getNode(ISD::SELECT, NVT, HiNotZero, HiLZ, DAG.getNode(ISD::ADD, NVT, LoLZ, - DAG.getConstant(MVT::getSizeInBits(NVT), NVT))); + DAG.getConstant(NVT.getSizeInBits(), NVT))); Hi = DAG.getConstant(0, NVT); } @@ -788,7 +789,7 @@ void DAGTypeLegalizer::ExpandResult_CTPOP(SDNode *N, SDOperand &Lo, SDOperand &Hi) { // ctpop(HiLo) -> ctpop(Hi)+ctpop(Lo) GetExpandedOp(N->getOperand(0), Lo, Hi); - MVT::ValueType NVT = Lo.getValueType(); + MVT NVT = Lo.getValueType(); Lo = DAG.getNode(ISD::ADD, NVT, DAG.getNode(ISD::CTPOP, NVT, Lo), DAG.getNode(ISD::CTPOP, NVT, Hi)); Hi = DAG.getConstant(0, NVT); @@ -798,7 +799,7 @@ void DAGTypeLegalizer::ExpandResult_CTTZ(SDNode *N, SDOperand &Lo, SDOperand &Hi) { // cttz (HiLo) -> Lo != 0 ? cttz(Lo) : (cttz(Hi)+32) GetExpandedOp(N->getOperand(0), Lo, Hi); - MVT::ValueType NVT = Lo.getValueType(); + MVT NVT = Lo.getValueType(); SDOperand LoNotZero = DAG.getSetCC(TLI.getSetCCResultType(Lo), Lo, DAG.getConstant(0, NVT), ISD::SETNE); @@ -808,7 +809,7 @@ void DAGTypeLegalizer::ExpandResult_CTTZ(SDNode *N, Lo = DAG.getNode(ISD::SELECT, NVT, LoNotZero, LoLZ, DAG.getNode(ISD::ADD, NVT, HiLZ, - DAG.getConstant(MVT::getSizeInBits(NVT), NVT))); + DAG.getConstant(NVT.getSizeInBits(), NVT))); Hi = DAG.getConstant(0, NVT); } @@ -816,25 +817,24 @@ void DAGTypeLegalizer::ExpandResult_EXTRACT_VECTOR_ELT(SDNode *N, SDOperand &Lo, SDOperand &Hi) { SDOperand OldVec = N->getOperand(0); - unsigned OldElts = MVT::getVectorNumElements(OldVec.getValueType()); + unsigned OldElts = OldVec.getValueType().getVectorNumElements(); // Convert to a vector of the expanded element type, for example // <2 x i64> -> <4 x i32>. - MVT::ValueType OldVT = N->getValueType(0); - MVT::ValueType NewVT = TLI.getTypeToTransformTo(OldVT); - assert(MVT::getSizeInBits(OldVT) == 2 * MVT::getSizeInBits(NewVT) && + MVT OldVT = N->getValueType(0); + MVT NewVT = TLI.getTypeToTransformTo(OldVT); + assert(OldVT.getSizeInBits() == 2 * NewVT.getSizeInBits() && "Do not know how to handle this expansion!"); SDOperand NewVec = DAG.getNode(ISD::BIT_CONVERT, - MVT::getVectorType(NewVT, 2 * OldElts), + MVT::getVectorVT(NewVT, 2*OldElts), OldVec); // Extract the elements at 2 * Idx and 2 * Idx + 1 from the new vector. SDOperand Idx = N->getOperand(1); // Make sure the type of Idx is big enough to hold the new values. - if (MVT::getSizeInBits(Idx.getValueType()) < - MVT::getSizeInBits(TLI.getPointerTy())) + if (Idx.getValueType().getSizeInBits() < TLI.getPointerTy().getSizeInBits()) Idx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), Idx); Idx = DAG.getNode(ISD::ADD, Idx.getValueType(), Idx, Idx); @@ -856,10 +856,10 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt, SDOperand InL, InH; GetExpandedOp(N->getOperand(0), InL, InH); - MVT::ValueType NVT = InL.getValueType(); - unsigned VTBits = MVT::getSizeInBits(N->getValueType(0)); - unsigned NVTBits = MVT::getSizeInBits(NVT); - MVT::ValueType ShTy = N->getOperand(1).getValueType(); + MVT NVT = InL.getValueType(); + unsigned VTBits = N->getValueType(0).getSizeInBits(); + unsigned NVTBits = NVT.getSizeInBits(); + MVT ShTy = N->getOperand(1).getValueType(); if (N->getOpcode() == ISD::SHL) { if (Amt > VTBits) { @@ -932,10 +932,10 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt, bool DAGTypeLegalizer:: ExpandShiftWithKnownAmountBit(SDNode *N, SDOperand &Lo, SDOperand &Hi) { SDOperand Amt = N->getOperand(1); - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - MVT::ValueType ShTy = Amt.getValueType(); - MVT::ValueType ShBits = MVT::getSizeInBits(ShTy); - unsigned NVTBits = MVT::getSizeInBits(NVT); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT ShTy = Amt.getValueType(); + unsigned ShBits = ShTy.getSizeInBits(); + unsigned NVTBits = NVT.getSizeInBits(); assert(isPowerOf2_32(NVTBits) && "Expanded integer type size not a power of two!"); @@ -1077,14 +1077,14 @@ SDOperand DAGTypeLegalizer::ExpandOperand_TRUNCATE(SDNode *N) { } SDOperand DAGTypeLegalizer::ExpandOperand_BIT_CONVERT(SDNode *N) { - if (MVT::isVector(N->getValueType(0))) { + if (N->getValueType(0).isVector()) { // An illegal integer type is being converted to a legal vector type. // Make a two element vector out of the expanded parts and convert that // instead, but only if the new vector type is legal (otherwise there // is no point, and it might create expansion loops). For example, on // x86 this turns v1i64 = BIT_CONVERT i64 into v1i64 = BIT_CONVERT v2i32. - MVT::ValueType OVT = N->getOperand(0).getValueType(); - MVT::ValueType NVT = MVT::getVectorType(TLI.getTypeToTransformTo(OVT), 2); + MVT OVT = N->getOperand(0).getValueType(); + MVT NVT = MVT::getVectorVT(TLI.getTypeToTransformTo(OVT), 2); if (isTypeLegal(NVT)) { SDOperand Parts[2]; @@ -1103,9 +1103,9 @@ SDOperand DAGTypeLegalizer::ExpandOperand_BIT_CONVERT(SDNode *N) { } SDOperand DAGTypeLegalizer::ExpandOperand_SINT_TO_FP(SDOperand Source, - MVT::ValueType DestTy) { + MVT DestTy) { // We know the destination is legal, but that the input needs to be expanded. - MVT::ValueType SourceVT = Source.getValueType(); + MVT SourceVT = Source.getValueType(); // Check to see if the target has a custom way to lower this. If so, use it. switch (TLI.getOperationAction(ISD::SINT_TO_FP, SourceVT)) { @@ -1149,7 +1149,7 @@ SDOperand DAGTypeLegalizer::ExpandOperand_SINT_TO_FP(SDOperand Source, } SDOperand DAGTypeLegalizer::ExpandOperand_UINT_TO_FP(SDOperand Source, - MVT::ValueType DestTy) { + MVT DestTy) { // We know the destination is legal, but that the input needs to be expanded. assert(getTypeAction(Source.getValueType()) == Expand && "This is not an expansion!"); @@ -1179,7 +1179,7 @@ SDOperand DAGTypeLegalizer::ExpandOperand_UINT_TO_FP(SDOperand Source, SDOperand FudgeInReg; if (DestTy == MVT::f32) FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx, NULL, 0); - else if (MVT::getSizeInBits(DestTy) > MVT::getSizeInBits(MVT::f32)) + else if (DestTy.getSizeInBits() > MVT(MVT::f32).getSizeInBits()) // FIXME: Avoid the extend by construction the right constantpool? FudgeInReg = DAG.getExtLoad(ISD::EXTLOAD, DestTy, DAG.getEntryNode(), CPIdx, NULL, 0, MVT::f32); @@ -1234,7 +1234,7 @@ void DAGTypeLegalizer::ExpandSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS, GetExpandedOp(NewLHS, LHSLo, LHSHi); GetExpandedOp(NewRHS, RHSLo, RHSHi); - MVT::ValueType VT = NewLHS.getValueType(); + MVT VT = NewLHS.getValueType(); if (VT == MVT::ppcf128) { // FIXME: This generated code sucks. We want to generate // FCMP crN, hi1, hi2 @@ -1343,8 +1343,8 @@ SDOperand DAGTypeLegalizer::ExpandOperand_STORE(StoreSDNode *N, unsigned OpNo) { // FIXME: Add support for indexed stores. assert(OpNo == 1 && "Can only expand the stored value so far"); - MVT::ValueType VT = N->getOperand(1).getValueType(); - MVT::ValueType NVT = TLI.getTypeToTransformTo(VT); + MVT VT = N->getOperand(1).getValueType(); + MVT NVT = TLI.getTypeToTransformTo(VT); SDOperand Ch = N->getChain(); SDOperand Ptr = N->getBasePtr(); int SVOffset = N->getSrcValueOffset(); @@ -1352,12 +1352,12 @@ SDOperand DAGTypeLegalizer::ExpandOperand_STORE(StoreSDNode *N, unsigned OpNo) { bool isVolatile = N->isVolatile(); SDOperand Lo, Hi; - assert(!(MVT::getSizeInBits(NVT) & 7) && "Expanded type not byte sized!"); + assert(!(NVT.getSizeInBits() & 7) && "Expanded type not byte sized!"); if (!N->isTruncatingStore()) { unsigned IncrementSize = 0; GetExpandedOp(N->getValue(), Lo, Hi); - IncrementSize = MVT::getSizeInBits(Hi.getValueType())/8; + IncrementSize = Hi.getValueType().getSizeInBits()/8; if (TLI.isBigEndian()) std::swap(Lo, Hi); @@ -1371,7 +1371,7 @@ SDOperand DAGTypeLegalizer::ExpandOperand_STORE(StoreSDNode *N, unsigned OpNo) { Hi = DAG.getStore(Ch, Hi, Ptr, N->getSrcValue(), SVOffset+IncrementSize, isVolatile, MinAlign(Alignment, IncrementSize)); return DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi); - } else if (MVT::getSizeInBits(N->getMemoryVT()) <= MVT::getSizeInBits(NVT)) { + } else if (N->getMemoryVT().getSizeInBits() <= NVT.getSizeInBits()) { GetExpandedOp(N->getValue(), Lo, Hi); return DAG.getTruncStore(Ch, Lo, Ptr, N->getSrcValue(), SVOffset, N->getMemoryVT(), isVolatile, Alignment); @@ -1383,11 +1383,11 @@ SDOperand DAGTypeLegalizer::ExpandOperand_STORE(StoreSDNode *N, unsigned OpNo) { isVolatile, Alignment); unsigned ExcessBits = - MVT::getSizeInBits(N->getMemoryVT()) - MVT::getSizeInBits(NVT); - MVT::ValueType NEVT = MVT::getIntegerType(ExcessBits); + N->getMemoryVT().getSizeInBits() - NVT.getSizeInBits(); + MVT NEVT = MVT::getIntegerVT(ExcessBits); // Increment the pointer to the other half. - unsigned IncrementSize = MVT::getSizeInBits(NVT)/8; + unsigned IncrementSize = NVT.getSizeInBits()/8; Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr, DAG.getIntPtrConstant(IncrementSize)); Hi = DAG.getTruncStore(Ch, Hi, Ptr, N->getSrcValue(), @@ -1399,17 +1399,16 @@ SDOperand DAGTypeLegalizer::ExpandOperand_STORE(StoreSDNode *N, unsigned OpNo) { // the cost of some bit-fiddling. GetExpandedOp(N->getValue(), Lo, Hi); - MVT::ValueType EVT = N->getMemoryVT(); - unsigned EBytes = MVT::getStoreSizeInBits(EVT)/8; - unsigned IncrementSize = MVT::getSizeInBits(NVT)/8; + MVT EVT = N->getMemoryVT(); + unsigned EBytes = EVT.getStoreSizeInBits()/8; + unsigned IncrementSize = NVT.getSizeInBits()/8; unsigned ExcessBits = (EBytes - IncrementSize)*8; - MVT::ValueType HiVT = - MVT::getIntegerType(MVT::getSizeInBits(EVT)-ExcessBits); + MVT HiVT = MVT::getIntegerVT(EVT.getSizeInBits() - ExcessBits); - if (ExcessBits < MVT::getSizeInBits(NVT)) { + if (ExcessBits < NVT.getSizeInBits()) { // Transfer high bits from the top of Lo to the bottom of Hi. Hi = DAG.getNode(ISD::SHL, NVT, Hi, - DAG.getConstant(MVT::getSizeInBits(NVT) - ExcessBits, + DAG.getConstant(NVT.getSizeInBits() - ExcessBits, TLI.getShiftAmountTy())); Hi = DAG.getNode(ISD::OR, NVT, Hi, DAG.getNode(ISD::SRL, NVT, Lo, @@ -1427,7 +1426,7 @@ SDOperand DAGTypeLegalizer::ExpandOperand_STORE(StoreSDNode *N, unsigned OpNo) { // Store the lowest ExcessBits bits in the second half. Lo = DAG.getTruncStore(Ch, Lo, Ptr, N->getSrcValue(), SVOffset+IncrementSize, - MVT::getIntegerType(ExcessBits), + MVT::getIntegerVT(ExcessBits), isVolatile, MinAlign(Alignment, IncrementSize)); return DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi); } @@ -1435,12 +1434,12 @@ SDOperand DAGTypeLegalizer::ExpandOperand_STORE(StoreSDNode *N, unsigned OpNo) { SDOperand DAGTypeLegalizer::ExpandOperand_BUILD_VECTOR(SDNode *N) { // The vector type is legal but the element type needs expansion. - MVT::ValueType VecVT = N->getValueType(0); - unsigned NumElts = MVT::getVectorNumElements(VecVT); - MVT::ValueType OldVT = N->getOperand(0).getValueType(); - MVT::ValueType NewVT = TLI.getTypeToTransformTo(OldVT); + MVT VecVT = N->getValueType(0); + unsigned NumElts = VecVT.getVectorNumElements(); + MVT OldVT = N->getOperand(0).getValueType(); + MVT NewVT = TLI.getTypeToTransformTo(OldVT); - assert(MVT::getSizeInBits(OldVT) == 2 * MVT::getSizeInBits(NewVT) && + assert(OldVT.getSizeInBits() == 2 * NewVT.getSizeInBits() && "Do not know how to expand this operand!"); // Build a vector of twice the length out of the expanded elements. @@ -1458,7 +1457,7 @@ SDOperand DAGTypeLegalizer::ExpandOperand_BUILD_VECTOR(SDNode *N) { } SDOperand NewVec = DAG.getNode(ISD::BUILD_VECTOR, - MVT::getVectorType(NewVT, NewElts.size()), + MVT::getVectorVT(NewVT, NewElts.size()), &NewElts[0], NewElts.size()); // Convert the new vector to the old vector type. diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesFloatToInt.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesFloatToInt.cpp index f23b63e..58fcf10 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypesFloatToInt.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypesFloatToInt.cpp @@ -21,7 +21,7 @@ using namespace llvm; /// GetFPLibCall - Return the right libcall for the given floating point type. -static RTLIB::Libcall GetFPLibCall(MVT::ValueType VT, +static RTLIB::Libcall GetFPLibCall(MVT VT, RTLIB::Libcall Call_F32, RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, @@ -105,7 +105,7 @@ SDOperand DAGTypeLegalizer::FloatToIntRes_ConstantFP(ConstantFPSDNode *N) { } SDOperand DAGTypeLegalizer::FloatToIntRes_FADD(SDNode *N) { - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); SDOperand Ops[2] = { GetIntegerOp(N->getOperand(0)), GetIntegerOp(N->getOperand(1)) }; return MakeLibCall(GetFPLibCall(N->getValueType(0), @@ -120,11 +120,11 @@ SDOperand DAGTypeLegalizer::FloatToIntRes_FCOPYSIGN(SDNode *N) { SDOperand LHS = GetIntegerOp(N->getOperand(0)); SDOperand RHS = BitConvertToInteger(N->getOperand(1)); - MVT::ValueType LVT = LHS.getValueType(); - MVT::ValueType RVT = RHS.getValueType(); + MVT LVT = LHS.getValueType(); + MVT RVT = RHS.getValueType(); - unsigned LSize = MVT::getSizeInBits(LVT); - unsigned RSize = MVT::getSizeInBits(RVT); + unsigned LSize = LVT.getSizeInBits(); + unsigned RSize = RVT.getSizeInBits(); // First get the sign bit of second operand. SDOperand SignBit = DAG.getNode(ISD::SHL, RVT, DAG.getConstant(1, RVT), @@ -133,7 +133,7 @@ SDOperand DAGTypeLegalizer::FloatToIntRes_FCOPYSIGN(SDNode *N) { SignBit = DAG.getNode(ISD::AND, RVT, RHS, SignBit); // Shift right or sign-extend it if the two operands have different types. - int SizeDiff = MVT::getSizeInBits(RVT) - MVT::getSizeInBits(LVT); + int SizeDiff = RVT.getSizeInBits() - LVT.getSizeInBits(); if (SizeDiff > 0) { SignBit = DAG.getNode(ISD::SRL, RVT, SignBit, DAG.getConstant(SizeDiff, TLI.getShiftAmountTy())); @@ -156,7 +156,7 @@ SDOperand DAGTypeLegalizer::FloatToIntRes_FCOPYSIGN(SDNode *N) { } SDOperand DAGTypeLegalizer::FloatToIntRes_FMUL(SDNode *N) { - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); SDOperand Ops[2] = { GetIntegerOp(N->getOperand(0)), GetIntegerOp(N->getOperand(1)) }; return MakeLibCall(GetFPLibCall(N->getValueType(0), @@ -168,7 +168,7 @@ SDOperand DAGTypeLegalizer::FloatToIntRes_FMUL(SDNode *N) { } SDOperand DAGTypeLegalizer::FloatToIntRes_FSUB(SDNode *N) { - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); SDOperand Ops[2] = { GetIntegerOp(N->getOperand(0)), GetIntegerOp(N->getOperand(1)) }; return MakeLibCall(GetFPLibCall(N->getValueType(0), @@ -181,8 +181,8 @@ SDOperand DAGTypeLegalizer::FloatToIntRes_FSUB(SDNode *N) { SDOperand DAGTypeLegalizer::FloatToIntRes_LOAD(SDNode *N) { LoadSDNode *L = cast<LoadSDNode>(N); - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType NVT = TLI.getTypeToTransformTo(VT); + MVT VT = N->getValueType(0); + MVT NVT = TLI.getTypeToTransformTo(VT); if (L->getExtensionType() == ISD::NON_EXTLOAD) return DAG.getLoad(L->getAddressingMode(), L->getExtensionType(), @@ -202,7 +202,7 @@ SDOperand DAGTypeLegalizer::FloatToIntRes_LOAD(SDNode *N) { SDOperand DAGTypeLegalizer::FloatToIntRes_XINT_TO_FP(SDNode *N) { bool isSigned = N->getOpcode() == ISD::SINT_TO_FP; - MVT::ValueType DestVT = N->getValueType(0); + MVT DestVT = N->getValueType(0); SDOperand Op = N->getOperand(0); if (Op.getValueType() == MVT::i32) { @@ -212,8 +212,9 @@ SDOperand DAGTypeLegalizer::FloatToIntRes_XINT_TO_FP(SDNode *N) { SDOperand StackSlot = DAG.CreateStackTemporary(MVT::f64); // word offset constant for Hi/Lo address computation - SDOperand Offset = DAG.getConstant(MVT::getSizeInBits(MVT::i32) / 8, - TLI.getPointerTy()); + SDOperand Offset = + DAG.getConstant(MVT(MVT::i32).getSizeInBits() / 8, + TLI.getPointerTy()); // set up Hi and Lo (into buffer) address based on endian SDOperand Hi = StackSlot; SDOperand Lo = DAG.getNode(ISD::ADD, TLI.getPointerTy(), StackSlot, Offset); @@ -251,10 +252,12 @@ SDOperand DAGTypeLegalizer::FloatToIntRes_XINT_TO_FP(SDNode *N) { if (DestVT == MVT::f64) { // do nothing Result = Sub; - } else if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(MVT::f64)) { + } else if (DestVT.getSizeInBits() < + MVT(MVT::f64).getSizeInBits()) { Result = DAG.getNode(ISD::FP_ROUND, DestVT, Sub, DAG.getIntPtrConstant(0)); - } else if (MVT::getSizeInBits(DestVT) > MVT::getSizeInBits(MVT::f64)) { + } else if (DestVT.getSizeInBits() > + MVT(MVT::f64).getSizeInBits()) { Result = DAG.getNode(ISD::FP_EXTEND, DestVT, Sub); } return BitConvertToInteger(Result); @@ -273,7 +276,7 @@ SDOperand DAGTypeLegalizer::FloatToIntRes_XINT_TO_FP(SDNode *N) { // as a negative number. To counteract this, the dynamic code adds an // offset depending on the data type. uint64_t FF; - switch (Op.getValueType()) { + switch (Op.getValueType().getSimpleVT()) { default: assert(0 && "Unsupported integer type!"); case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesPromote.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesPromote.cpp index 6d7f063..b0a8475 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypesPromote.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypesPromote.cpp @@ -91,7 +91,7 @@ SDOperand DAGTypeLegalizer::PromoteResult_UNDEF(SDNode *N) { } SDOperand DAGTypeLegalizer::PromoteResult_Constant(SDNode *N) { - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // Zero extend things like i1, sign extend everything else. It shouldn't // matter in theory which one we pick, but this tends to give better code? unsigned Opc = VT != MVT::i1 ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; @@ -115,8 +115,8 @@ SDOperand DAGTypeLegalizer::PromoteResult_TRUNCATE(SDNode *N) { break; } - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - assert(MVT::getSizeInBits(Res.getValueType()) >= MVT::getSizeInBits(NVT) && + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + assert(Res.getValueType().getSizeInBits() >= NVT.getSizeInBits() && "Truncation doesn't make sense!"); if (Res.getValueType() == NVT) return Res; @@ -126,11 +126,11 @@ SDOperand DAGTypeLegalizer::PromoteResult_TRUNCATE(SDNode *N) { } SDOperand DAGTypeLegalizer::PromoteResult_INT_EXTEND(SDNode *N) { - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); if (getTypeAction(N->getOperand(0).getValueType()) == Promote) { SDOperand Res = GetPromotedOp(N->getOperand(0)); - assert(MVT::getSizeInBits(Res.getValueType()) <= MVT::getSizeInBits(NVT) && + assert(Res.getValueType().getSizeInBits() <= NVT.getSizeInBits() && "Extension doesn't make sense!"); // If the result and operand types are the same after promotion, simplify @@ -168,7 +168,7 @@ SDOperand DAGTypeLegalizer::PromoteResult_FP_TO_XINT(SDNode *N) { Op = GetPromotedOp(Op); unsigned NewOpc = N->getOpcode(); - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); // If we're promoting a UINT to a larger size, check to see if the new node // will be legal. If it isn't, check to see if FP_TO_SINT is legal, since @@ -194,7 +194,7 @@ SDOperand DAGTypeLegalizer::PromoteResult_SETCC(SDNode *N) { SDOperand DAGTypeLegalizer::PromoteResult_LOAD(LoadSDNode *N) { // FIXME: Add support for indexed loads. - MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(N) ? ISD::EXTLOAD : N->getExtensionType(); SDOperand Res = DAG.getExtLoad(ExtType, NVT, N->getChain(), N->getBasePtr(), @@ -218,9 +218,9 @@ SDOperand DAGTypeLegalizer::PromoteResult_BUILD_PAIR(SDNode *N) { SDOperand DAGTypeLegalizer::PromoteResult_BIT_CONVERT(SDNode *N) { SDOperand InOp = N->getOperand(0); - MVT::ValueType InVT = InOp.getValueType(); - MVT::ValueType NInVT = TLI.getTypeToTransformTo(InVT); - MVT::ValueType OutVT = TLI.getTypeToTransformTo(N->getValueType(0)); + MVT InVT = InOp.getValueType(); + MVT NInVT = TLI.getTypeToTransformTo(InVT); + MVT OutVT = TLI.getTypeToTransformTo(N->getValueType(0)); switch (getTypeAction(InVT)) { default: @@ -229,7 +229,7 @@ SDOperand DAGTypeLegalizer::PromoteResult_BIT_CONVERT(SDNode *N) { case Legal: break; case Promote: - if (MVT::getSizeInBits(OutVT) == MVT::getSizeInBits(NInVT)) + if (OutVT.getSizeInBits() == NInVT.getSizeInBits()) // The input promotes to the same size. Convert the promoted value. return DAG.getNode(ISD::BIT_CONVERT, OutVT, GetPromotedOp(InOp)); break; @@ -254,7 +254,7 @@ SDOperand DAGTypeLegalizer::PromoteResult_BIT_CONVERT(SDNode *N) { std::swap(Lo, Hi); InOp = DAG.getNode(ISD::ANY_EXTEND, - MVT::getIntegerType(MVT::getSizeInBits(OutVT)), + MVT::getIntegerVT(OutVT.getSizeInBits()), JoinIntegers(Lo, Hi)); return DAG.getNode(ISD::BIT_CONVERT, OutVT, InOp); } @@ -278,7 +278,7 @@ SDOperand DAGTypeLegalizer::PromoteResult_SDIV(SDNode *N) { // Sign extend the input. SDOperand LHS = GetPromotedOp(N->getOperand(0)); SDOperand RHS = GetPromotedOp(N->getOperand(1)); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); LHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, LHS.getValueType(), LHS, DAG.getValueType(VT)); RHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, RHS.getValueType(), RHS, @@ -291,7 +291,7 @@ SDOperand DAGTypeLegalizer::PromoteResult_UDIV(SDNode *N) { // Zero extend the input. SDOperand LHS = GetPromotedOp(N->getOperand(0)); SDOperand RHS = GetPromotedOp(N->getOperand(1)); - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); LHS = DAG.getZeroExtendInReg(LHS, VT); RHS = DAG.getZeroExtendInReg(RHS, VT); @@ -305,8 +305,8 @@ SDOperand DAGTypeLegalizer::PromoteResult_SHL(SDNode *N) { SDOperand DAGTypeLegalizer::PromoteResult_SRA(SDNode *N) { // The input value must be properly sign extended. - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType NVT = TLI.getTypeToTransformTo(VT); + MVT VT = N->getValueType(0); + MVT NVT = TLI.getTypeToTransformTo(VT); SDOperand Res = GetPromotedOp(N->getOperand(0)); Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, NVT, Res, DAG.getValueType(VT)); return DAG.getNode(ISD::SRA, NVT, Res, N->getOperand(1)); @@ -314,8 +314,8 @@ SDOperand DAGTypeLegalizer::PromoteResult_SRA(SDNode *N) { SDOperand DAGTypeLegalizer::PromoteResult_SRL(SDNode *N) { // The input value must be properly zero extended. - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType NVT = TLI.getTypeToTransformTo(VT); + MVT VT = N->getValueType(0); + MVT NVT = TLI.getTypeToTransformTo(VT); SDOperand Res = GetPromotedZExtOp(N->getOperand(0)); return DAG.getNode(ISD::SRL, NVT, Res, N->getOperand(1)); } @@ -335,41 +335,41 @@ SDOperand DAGTypeLegalizer::PromoteResult_SELECT_CC(SDNode *N) { SDOperand DAGTypeLegalizer::PromoteResult_CTLZ(SDNode *N) { SDOperand Op = GetPromotedOp(N->getOperand(0)); - MVT::ValueType OVT = N->getValueType(0); - MVT::ValueType NVT = Op.getValueType(); + MVT OVT = N->getValueType(0); + MVT NVT = Op.getValueType(); // Zero extend to the promoted type and do the count there. Op = DAG.getNode(ISD::CTLZ, NVT, DAG.getZeroExtendInReg(Op, OVT)); // Subtract off the extra leading bits in the bigger type. return DAG.getNode(ISD::SUB, NVT, Op, - DAG.getConstant(MVT::getSizeInBits(NVT) - - MVT::getSizeInBits(OVT), NVT)); + DAG.getConstant(NVT.getSizeInBits() - + OVT.getSizeInBits(), NVT)); } SDOperand DAGTypeLegalizer::PromoteResult_CTPOP(SDNode *N) { SDOperand Op = GetPromotedOp(N->getOperand(0)); - MVT::ValueType OVT = N->getValueType(0); - MVT::ValueType NVT = Op.getValueType(); + MVT OVT = N->getValueType(0); + MVT NVT = Op.getValueType(); // Zero extend to the promoted type and do the count there. return DAG.getNode(ISD::CTPOP, NVT, DAG.getZeroExtendInReg(Op, OVT)); } SDOperand DAGTypeLegalizer::PromoteResult_CTTZ(SDNode *N) { SDOperand Op = GetPromotedOp(N->getOperand(0)); - MVT::ValueType OVT = N->getValueType(0); - MVT::ValueType NVT = Op.getValueType(); + MVT OVT = N->getValueType(0); + MVT NVT = Op.getValueType(); // The count is the same in the promoted type except if the original // value was zero. This can be handled by setting the bit just off // the top of the original type. Op = DAG.getNode(ISD::OR, NVT, Op, // FIXME: Do this using an APINT constant. - DAG.getConstant(1UL << MVT::getSizeInBits(OVT), NVT)); + DAG.getConstant(1UL << OVT.getSizeInBits(), NVT)); return DAG.getNode(ISD::CTTZ, NVT, Op); } SDOperand DAGTypeLegalizer::PromoteResult_EXTRACT_VECTOR_ELT(SDNode *N) { - MVT::ValueType OldVT = N->getValueType(0); + MVT OldVT = N->getValueType(0); SDOperand OldVec = N->getOperand(0); - unsigned OldElts = MVT::getVectorNumElements(OldVec.getValueType()); + unsigned OldElts = OldVec.getValueType().getVectorNumElements(); if (OldElts == 1) { assert(!isTypeLegal(OldVec.getValueType()) && @@ -384,11 +384,11 @@ SDOperand DAGTypeLegalizer::PromoteResult_EXTRACT_VECTOR_ELT(SDNode *N) { // Convert to a vector half as long with an element type of twice the width, // for example <4 x i16> -> <2 x i32>. assert(!(OldElts & 1) && "Odd length vectors not supported!"); - MVT::ValueType NewVT = MVT::getIntegerType(2 * MVT::getSizeInBits(OldVT)); - assert(!MVT::isExtendedVT(OldVT) && !MVT::isExtendedVT(NewVT)); + MVT NewVT = MVT::getIntegerVT(2 * OldVT.getSizeInBits()); + assert(OldVT.isSimple() && NewVT.isSimple()); SDOperand NewVec = DAG.getNode(ISD::BIT_CONVERT, - MVT::getVectorType(NewVT, OldElts / 2), + MVT::getVectorVT(NewVT, OldElts / 2), OldVec); // Extract the element at OldIdx / 2 from the new vector. @@ -401,7 +401,7 @@ SDOperand DAGTypeLegalizer::PromoteResult_EXTRACT_VECTOR_ELT(SDNode *N) { // Hi if it was odd. SDOperand Lo = Elt; SDOperand Hi = DAG.getNode(ISD::SRL, NewVT, Elt, - DAG.getConstant(MVT::getSizeInBits(OldVT), + DAG.getConstant(OldVT.getSizeInBits(), TLI.getShiftAmountTy())); if (TLI.isBigEndian()) std::swap(Lo, Hi); @@ -513,7 +513,7 @@ SDOperand DAGTypeLegalizer::PromoteOperand_FP_ROUND(SDNode *N) { SDOperand DAGTypeLegalizer::PromoteOperand_INT_TO_FP(SDNode *N) { SDOperand In = GetPromotedOp(N->getOperand(0)); - MVT::ValueType OpVT = N->getOperand(0).getValueType(); + MVT OpVT = N->getOperand(0).getValueType(); if (N->getOpcode() == ISD::UINT_TO_FP) In = DAG.getZeroExtendInReg(In, OpVT); else @@ -525,14 +525,14 @@ SDOperand DAGTypeLegalizer::PromoteOperand_INT_TO_FP(SDNode *N) { SDOperand DAGTypeLegalizer::PromoteOperand_BUILD_PAIR(SDNode *N) { // Since the result type is legal, the operands must promote to it. - MVT::ValueType OVT = N->getOperand(0).getValueType(); + MVT OVT = N->getOperand(0).getValueType(); SDOperand Lo = GetPromotedOp(N->getOperand(0)); SDOperand Hi = GetPromotedOp(N->getOperand(1)); assert(Lo.getValueType() == N->getValueType(0) && "Operand over promoted?"); Lo = DAG.getZeroExtendInReg(Lo, OVT); Hi = DAG.getNode(ISD::SHL, N->getValueType(0), Hi, - DAG.getConstant(MVT::getSizeInBits(OVT), + DAG.getConstant(OVT.getSizeInBits(), TLI.getShiftAmountTy())); return DAG.getNode(ISD::OR, N->getValueType(0), Lo, Hi); } @@ -597,14 +597,14 @@ SDOperand DAGTypeLegalizer::PromoteOperand_SETCC(SDNode *N, unsigned OpNo) { /// shared among BR_CC, SELECT_CC, and SETCC handlers. void DAGTypeLegalizer::PromoteSetCCOperands(SDOperand &NewLHS,SDOperand &NewRHS, ISD::CondCode CCCode) { - MVT::ValueType VT = NewLHS.getValueType(); + MVT VT = NewLHS.getValueType(); // Get the promoted values. NewLHS = GetPromotedOp(NewLHS); NewRHS = GetPromotedOp(NewRHS); // If this is an FP compare, the operands have already been extended. - if (!MVT::isInteger(NewLHS.getValueType())) + if (!NewLHS.getValueType().isInteger()) return; // Otherwise, we have to insert explicit sign or zero extends. Note @@ -658,15 +658,15 @@ SDOperand DAGTypeLegalizer::PromoteOperand_BUILD_VECTOR(SDNode *N) { // The vector type is legal but the element type is not. This implies // that the vector is a power-of-two in length and that the element // type does not have a strange size (eg: it is not i1). - MVT::ValueType VecVT = N->getValueType(0); - unsigned NumElts = MVT::getVectorNumElements(VecVT); + MVT VecVT = N->getValueType(0); + unsigned NumElts = VecVT.getVectorNumElements(); assert(!(NumElts & 1) && "Legal vector of one illegal element?"); // Build a vector of half the length out of elements of twice the bitwidth. // For example <4 x i16> -> <2 x i32>. - MVT::ValueType OldVT = N->getOperand(0).getValueType(); - MVT::ValueType NewVT = MVT::getIntegerType(2 * MVT::getSizeInBits(OldVT)); - assert(!MVT::isExtendedVT(OldVT) && !MVT::isExtendedVT(NewVT)); + MVT OldVT = N->getOperand(0).getValueType(); + MVT NewVT = MVT::getIntegerVT(2 * OldVT.getSizeInBits()); + assert(OldVT.isSimple() && NewVT.isSimple()); std::vector<SDOperand> NewElts; NewElts.reserve(NumElts/2); @@ -681,7 +681,7 @@ SDOperand DAGTypeLegalizer::PromoteOperand_BUILD_VECTOR(SDNode *N) { } SDOperand NewVec = DAG.getNode(ISD::BUILD_VECTOR, - MVT::getVectorType(NewVT, NewElts.size()), + MVT::getVectorVT(NewVT, NewElts.size()), &NewElts[0], NewElts.size()); // Convert the new vector to the old vector type. @@ -695,8 +695,8 @@ SDOperand DAGTypeLegalizer::PromoteOperand_INSERT_VECTOR_ELT(SDNode *N, // have to match the vector element type. // Check that any extra bits introduced will be truncated away. - assert(MVT::getSizeInBits(N->getOperand(1).getValueType()) >= - MVT::getSizeInBits(MVT::getVectorElementType(N->getValueType(0))) && + assert(N->getOperand(1).getValueType().getSizeInBits() >= + N->getValueType(0).getVectorElementType().getSizeInBits() && "Type of inserted value narrower than vector element type!"); return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0), GetPromotedOp(N->getOperand(1)), diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesScalarize.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesScalarize.cpp index d9ba99f..eaab770 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypesScalarize.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypesScalarize.cpp @@ -89,12 +89,12 @@ void DAGTypeLegalizer::ScalarizeResult(SDNode *N, unsigned ResNo) { } SDOperand DAGTypeLegalizer::ScalarizeRes_UNDEF(SDNode *N) { - return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(N->getValueType(0))); + return DAG.getNode(ISD::UNDEF, N->getValueType(0).getVectorElementType()); } SDOperand DAGTypeLegalizer::ScalarizeRes_LOAD(LoadSDNode *N) { // FIXME: Add support for indexed loads. - SDOperand Result = DAG.getLoad(MVT::getVectorElementType(N->getValueType(0)), + SDOperand Result = DAG.getLoad(N->getValueType(0).getVectorElementType(), N->getChain(), N->getBasePtr(), N->getSrcValue(), N->getSrcValueOffset(), N->isVolatile(), N->getAlignment()); @@ -125,8 +125,8 @@ SDOperand DAGTypeLegalizer::ScalarizeRes_INSERT_VECTOR_ELT(SDNode *N) { // The value to insert may have a wider type than the vector element type, // so be sure to truncate it to the element type if necessary. SDOperand Op = N->getOperand(1); - MVT::ValueType EltVT = MVT::getVectorElementType(N->getValueType(0)); - if (MVT::getSizeInBits(Op.getValueType()) > MVT::getSizeInBits(EltVT)) + MVT EltVT = N->getValueType(0).getVectorElementType(); + if (Op.getValueType().getSizeInBits() > EltVT.getSizeInBits()) Op = DAG.getNode(ISD::TRUNCATE, EltVT, Op); assert(Op.getValueType() == EltVT && "Invalid type for inserted value!"); return Op; @@ -140,7 +140,7 @@ SDOperand DAGTypeLegalizer::ScalarizeRes_VECTOR_SHUFFLE(SDNode *N) { } SDOperand DAGTypeLegalizer::ScalarizeRes_BIT_CONVERT(SDNode *N) { - MVT::ValueType NewVT = MVT::getVectorElementType(N->getValueType(0)); + MVT NewVT = N->getValueType(0).getVectorElementType(); return DAG.getNode(ISD::BIT_CONVERT, NewVT, N->getOperand(0)); } diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesSplit.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesSplit.cpp index d1057c9..a7c6486 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypesSplit.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypesSplit.cpp @@ -19,18 +19,17 @@ using namespace llvm; /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a vector /// type that needs to be split. This handles non-power of two vectors. -static void GetSplitDestVTs(MVT::ValueType InVT, - MVT::ValueType &Lo, MVT::ValueType &Hi) { - MVT::ValueType NewEltVT = MVT::getVectorElementType(InVT); - unsigned NumElements = MVT::getVectorNumElements(InVT); +static void GetSplitDestVTs(MVT InVT, MVT &Lo, MVT &Hi) { + MVT NewEltVT = InVT.getVectorElementType(); + unsigned NumElements = InVT.getVectorNumElements(); if ((NumElements & (NumElements-1)) == 0) { // Simple power of two vector. NumElements >>= 1; - Lo = Hi = MVT::getVectorType(NewEltVT, NumElements); + Lo = Hi = MVT::getVectorVT(NewEltVT, NumElements); } else { // Non-power-of-two vectors. unsigned NewNumElts_Lo = 1 << Log2_32(NumElements); unsigned NewNumElts_Hi = NumElements - NewNumElts_Lo; - Lo = MVT::getVectorType(NewEltVT, NewNumElts_Lo); - Hi = MVT::getVectorType(NewEltVT, NewNumElts_Hi); + Lo = MVT::getVectorVT(NewEltVT, NewNumElts_Lo); + Hi = MVT::getVectorVT(NewEltVT, NewNumElts_Hi); } } @@ -117,7 +116,7 @@ void DAGTypeLegalizer::SplitResult(SDNode *N, unsigned ResNo) { } void DAGTypeLegalizer::SplitRes_UNDEF(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType LoVT, HiVT; + MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); Lo = DAG.getNode(ISD::UNDEF, LoVT); @@ -127,7 +126,7 @@ void DAGTypeLegalizer::SplitRes_UNDEF(SDNode *N, SDOperand &Lo, SDOperand &Hi) { void DAGTypeLegalizer::SplitRes_LOAD(LoadSDNode *LD, SDOperand &Lo, SDOperand &Hi) { // FIXME: Add support for indexed loads. - MVT::ValueType LoVT, HiVT; + MVT LoVT, HiVT; GetSplitDestVTs(LD->getValueType(0), LoVT, HiVT); SDOperand Ch = LD->getChain(); @@ -138,7 +137,7 @@ void DAGTypeLegalizer::SplitRes_LOAD(LoadSDNode *LD, bool isVolatile = LD->isVolatile(); Lo = DAG.getLoad(LoVT, Ch, Ptr, SV, SVOffset, isVolatile, Alignment); - unsigned IncrementSize = MVT::getSizeInBits(LoVT)/8; + unsigned IncrementSize = LoVT.getSizeInBits()/8; Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr, DAG.getIntPtrConstant(IncrementSize)); SVOffset += IncrementSize; @@ -166,7 +165,7 @@ void DAGTypeLegalizer::SplitRes_INSERT_VECTOR_ELT(SDNode *N, SDOperand &Lo, GetSplitOp(N->getOperand(0), Lo, Hi); unsigned Index = cast<ConstantSDNode>(N->getOperand(2))->getValue(); SDOperand ScalarOp = N->getOperand(1); - unsigned LoNumElts = MVT::getVectorNumElements(Lo.getValueType()); + unsigned LoNumElts = Lo.getValueType().getVectorNumElements(); if (Index < LoNumElts) Lo = DAG.getNode(ISD::INSERT_VECTOR_ELT, Lo.getValueType(), Lo, ScalarOp, N->getOperand(2)); @@ -180,10 +179,10 @@ void DAGTypeLegalizer::SplitRes_VECTOR_SHUFFLE(SDNode *N, // Build the low part. SDOperand Mask = N->getOperand(2); SmallVector<SDOperand, 16> Ops; - MVT::ValueType LoVT, HiVT; + MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); - MVT::ValueType EltVT = MVT::getVectorElementType(LoVT); - unsigned LoNumElts = MVT::getVectorNumElements(LoVT); + MVT EltVT = LoVT.getVectorElementType(); + unsigned LoNumElts = LoVT.getVectorNumElements(); unsigned NumElements = Mask.getNumOperands(); // Insert all of the elements from the input that are needed. We use @@ -217,9 +216,9 @@ void DAGTypeLegalizer::SplitRes_VECTOR_SHUFFLE(SDNode *N, void DAGTypeLegalizer::SplitRes_BUILD_VECTOR(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - MVT::ValueType LoVT, HiVT; + MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); - unsigned LoNumElts = MVT::getVectorNumElements(LoVT); + unsigned LoNumElts = LoVT.getVectorNumElements(); SmallVector<SDOperand, 8> LoOps(N->op_begin(), N->op_begin()+LoNumElts); Lo = DAG.getNode(ISD::BUILD_VECTOR, LoVT, &LoOps[0], LoOps.size()); @@ -237,7 +236,7 @@ void DAGTypeLegalizer::SplitRes_CONCAT_VECTORS(SDNode *N, return; } - MVT::ValueType LoVT, HiVT; + MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); SmallVector<SDOperand, 8> LoOps(N->op_begin(), N->op_begin()+NumSubvectors); @@ -251,11 +250,11 @@ void DAGTypeLegalizer::SplitRes_BIT_CONVERT(SDNode *N, SDOperand &Lo, SDOperand &Hi) { // We know the result is a vector. The input may be either a vector or a // scalar value. - MVT::ValueType LoVT, HiVT; + MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); SDOperand InOp = N->getOperand(0); - MVT::ValueType InVT = InOp.getValueType(); + MVT InVT = InOp.getValueType(); // Handle some special cases efficiently. switch (getTypeAction(InVT)) { @@ -289,8 +288,8 @@ void DAGTypeLegalizer::SplitRes_BIT_CONVERT(SDNode *N, } // In the general case, convert the input to an integer and split it by hand. - MVT::ValueType LoIntVT = MVT::getIntegerType(MVT::getSizeInBits(LoVT)); - MVT::ValueType HiIntVT = MVT::getIntegerType(MVT::getSizeInBits(HiVT)); + MVT LoIntVT = MVT::getIntegerVT(LoVT.getSizeInBits()); + MVT HiIntVT = MVT::getIntegerVT(HiVT.getSizeInBits()); if (TLI.isBigEndian()) std::swap(LoIntVT, HiIntVT); @@ -314,7 +313,7 @@ void DAGTypeLegalizer::SplitRes_BinOp(SDNode *N, SDOperand &Lo, SDOperand &Hi) { void DAGTypeLegalizer::SplitRes_UnOp(SDNode *N, SDOperand &Lo, SDOperand &Hi) { // Get the dest types. This doesn't always match input types, e.g. int_to_fp. - MVT::ValueType LoVT, HiVT; + MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); GetSplitOp(N->getOperand(0), Lo, Hi); @@ -410,7 +409,7 @@ SDOperand DAGTypeLegalizer::SplitOp_STORE(StoreSDNode *N, unsigned OpNo) { SDOperand Lo, Hi; GetSplitOp(N->getOperand(1), Lo, Hi); - unsigned IncrementSize = MVT::getSizeInBits(Lo.getValueType())/8; + unsigned IncrementSize = Lo.getValueType().getSizeInBits()/8; Lo = DAG.getStore(Ch, Lo, Ptr, N->getSrcValue(), SVOffset, isVol, Alignment); @@ -455,17 +454,16 @@ SDOperand DAGTypeLegalizer::SplitOp_BIT_CONVERT(SDNode *N) { SDOperand DAGTypeLegalizer::SplitOp_EXTRACT_VECTOR_ELT(SDNode *N) { SDOperand Vec = N->getOperand(0); SDOperand Idx = N->getOperand(1); - MVT::ValueType VecVT = Vec.getValueType(); + MVT VecVT = Vec.getValueType(); if (isa<ConstantSDNode>(Idx)) { uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getValue(); - assert(IdxVal < MVT::getVectorNumElements(VecVT) && - "Invalid vector index!"); + assert(IdxVal < VecVT.getVectorNumElements() && "Invalid vector index!"); SDOperand Lo, Hi; GetSplitOp(Vec, Lo, Hi); - uint64_t LoElts = MVT::getVectorNumElements(Lo.getValueType()); + uint64_t LoElts = Lo.getValueType().getVectorNumElements(); if (IdxVal < LoElts) return DAG.UpdateNodeOperands(SDOperand(N, 0), Lo, Idx); @@ -480,13 +478,12 @@ SDOperand DAGTypeLegalizer::SplitOp_EXTRACT_VECTOR_ELT(SDNode *N) { SDOperand Store = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0); // Add the offset to the index. - MVT::ValueType EltVT = MVT::getVectorElementType(VecVT); - unsigned EltSize = MVT::getSizeInBits(EltVT)/8; // FIXME: should be ABI size. + MVT EltVT = VecVT.getVectorElementType(); + unsigned EltSize = EltVT.getSizeInBits()/8; // FIXME: should be ABI size. Idx = DAG.getNode(ISD::MUL, Idx.getValueType(), Idx, DAG.getConstant(EltSize, Idx.getValueType())); - if (MVT::getSizeInBits(Idx.getValueType()) > - MVT::getSizeInBits(TLI.getPointerTy())) + if (Idx.getValueType().getSizeInBits() > TLI.getPointerTy().getSizeInBits()) Idx = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), Idx); else Idx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), Idx); @@ -498,16 +495,16 @@ SDOperand DAGTypeLegalizer::SplitOp_EXTRACT_VECTOR_ELT(SDNode *N) { SDOperand DAGTypeLegalizer::SplitOp_EXTRACT_SUBVECTOR(SDNode *N) { // We know that the extracted result type is legal. For now, assume the index // is a constant. - MVT::ValueType SubVT = N->getValueType(0); + MVT SubVT = N->getValueType(0); SDOperand Idx = N->getOperand(1); SDOperand Lo, Hi; GetSplitOp(N->getOperand(0), Lo, Hi); - uint64_t LoElts = MVT::getVectorNumElements(Lo.getValueType()); + uint64_t LoElts = Lo.getValueType().getVectorNumElements(); uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getValue(); if (IdxVal < LoElts) { - assert(IdxVal + MVT::getVectorNumElements(SubVT) <= LoElts && + assert(IdxVal + SubVT.getVectorNumElements() <= LoElts && "Extracted subvector crosses vector split!"); return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SubVT, Lo, Idx); } else { @@ -519,7 +516,7 @@ SDOperand DAGTypeLegalizer::SplitOp_EXTRACT_SUBVECTOR(SDNode *N) { SDOperand DAGTypeLegalizer::SplitOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo) { assert(OpNo == 2 && "Shuffle source type differs from result type?"); SDOperand Mask = N->getOperand(2); - unsigned MaskLength = MVT::getVectorNumElements(Mask.getValueType()); + unsigned MaskLength = Mask.getValueType().getVectorNumElements(); unsigned LargestMaskEntryPlusOne = 2 * MaskLength; unsigned MinimumBitWidth = Log2_32_Ceil(LargestMaskEntryPlusOne); @@ -532,12 +529,12 @@ SDOperand DAGTypeLegalizer::SplitOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo) { EltVT = MVT::SimpleValueType(EltVT + 1)) { // Is the element type big enough to hold the values? - if (MVT::getSizeInBits(EltVT) < MinimumBitWidth) + if (MVT(EltVT).getSizeInBits() < MinimumBitWidth) // Nope. continue; // Is the vector type legal? - MVT::ValueType VecVT = MVT::getVectorType(EltVT, MaskLength); + MVT VecVT = MVT::getVectorVT(EltVT, MaskLength); if (!isTypeLegal(VecVT)) // Nope. continue; diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp index 70365e2..d60d2b0 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp @@ -191,7 +191,7 @@ void ScheduleDAG::BuildSchedUnits() { assert(OpSU && "Node has no SUnit!"); if (OpSU == SU) continue; // In the same group. - MVT::ValueType OpVT = N->getOperand(i).getValueType(); + MVT OpVT = N->getOperand(i).getValueType(); assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!"); bool isChain = OpVT == MVT::Other; @@ -433,7 +433,7 @@ void ScheduleDAG::EmitCopyFromReg(SDNode *Node, unsigned ResNo, SDOperand Op = Use->getOperand(i); if (Op.Val != Node || Op.ResNo != ResNo) continue; - MVT::ValueType VT = Node->getValueType(Op.ResNo); + MVT VT = Node->getValueType(Op.ResNo); if (VT != MVT::Other && VT != MVT::Flag) Match = false; } @@ -677,7 +677,7 @@ static const TargetRegisterClass *getSubRegisterRegClass( static const TargetRegisterClass *getSuperregRegisterClass( const TargetRegisterClass *TRC, unsigned SubIdx, - MVT::ValueType VT) { + MVT VT) { // Pick the register class of the superegister for this type for (TargetRegisterInfo::regclass_iterator I = TRC->superregclasses_begin(), E = TRC->superregclasses_end(); I != E; ++I) diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp index f84b3f0..d2ed4ef 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -646,7 +646,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { SUnit *NewSU; bool TryUnfold = false; for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { - MVT::ValueType VT = N->getValueType(i); + MVT VT = N->getValueType(i); if (VT == MVT::Flag) return NULL; else if (VT == MVT::Other) @@ -654,7 +654,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { } for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { const SDOperand &Op = N->getOperand(i); - MVT::ValueType VT = Op.Val->getValueType(Op.ResNo); + MVT VT = Op.Val->getValueType(Op.ResNo); if (VT == MVT::Flag) return NULL; } @@ -872,8 +872,8 @@ void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, /// getPhysicalRegisterVT - Returns the ValueType of the physical register /// definition of the specified node. /// FIXME: Move to SelectionDAG? -static MVT::ValueType getPhysicalRegisterVT(SDNode *N, unsigned Reg, - const TargetInstrInfo *TII) { +static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, + const TargetInstrInfo *TII) { const TargetInstrDesc &TID = TII->get(N->getTargetOpcode()); assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!"); unsigned NumRes = TID.getNumDefs(); @@ -1022,7 +1022,7 @@ void ScheduleDAGRRList::ListScheduleBottomUp() { SUnit *NewDef = CopyAndMoveSuccessors(LRDef); if (!NewDef) { // Issue expensive cross register class copies. - MVT::ValueType VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII); + MVT VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII); const TargetRegisterClass *RC = TRI->getPhysicalRegisterRegClass(Reg, VT); const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); @@ -1599,7 +1599,7 @@ static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU, if (!SUImpDefs) return false; for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { - MVT::ValueType VT = N->getValueType(i); + MVT VT = N->getValueType(i); if (VT == MVT::Flag || VT == MVT::Other) continue; unsigned Reg = ImpDefs[i - NumDefs]; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 8d0d344..db73ab4 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -40,13 +40,13 @@ using namespace llvm; /// makeVTList - Return an instance of the SDVTList struct initialized with the /// specified members. -static SDVTList makeVTList(const MVT::ValueType *VTs, unsigned NumVTs) { +static SDVTList makeVTList(const MVT *VTs, unsigned NumVTs) { SDVTList Res = {VTs, NumVTs}; return Res; } -static const fltSemantics *MVTToAPFloatSemantics(MVT::ValueType VT) { - switch (VT) { +static const fltSemantics *MVTToAPFloatSemantics(MVT VT) { + switch (VT.getSimpleVT()) { default: assert(0 && "Unknown FP format"); case MVT::f32: return &APFloat::IEEEsingle; case MVT::f64: return &APFloat::IEEEdouble; @@ -70,9 +70,9 @@ bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { return Value.bitwiseIsEqual(V); } -bool ConstantFPSDNode::isValueValidForType(MVT::ValueType VT, +bool ConstantFPSDNode::isValueValidForType(MVT VT, const APFloat& Val) { - assert(MVT::isFloatingPoint(VT) && "Can only convert between FP types"); + assert(VT.isFloatingPoint() && "Can only convert between FP types"); // PPC long double cannot be converted to any other type. if (VT == MVT::ppcf128 || @@ -416,7 +416,7 @@ static void AddNodeIDNode(FoldingSetNodeID &ID, SDNode *N) { LoadSDNode *LD = cast<LoadSDNode>(N); ID.AddInteger(LD->getAddressingMode()); ID.AddInteger(LD->getExtensionType()); - ID.AddInteger((unsigned int)(LD->getMemoryVT())); + ID.AddInteger(LD->getMemoryVT().V); ID.AddInteger(LD->getAlignment()); ID.AddInteger(LD->isVolatile()); break; @@ -425,7 +425,7 @@ static void AddNodeIDNode(FoldingSetNodeID &ID, SDNode *N) { StoreSDNode *ST = cast<StoreSDNode>(N); ID.AddInteger(ST->getAddressingMode()); ID.AddInteger(ST->isTruncatingStore()); - ID.AddInteger((unsigned int)(ST->getMemoryVT())); + ID.AddInteger(ST->getMemoryVT().V); ID.AddInteger(ST->getAlignment()); ID.AddInteger(ST->isVolatile()); break; @@ -574,12 +574,12 @@ void SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { TargetExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); break; case ISD::VALUETYPE: { - MVT::ValueType VT = cast<VTSDNode>(N)->getVT(); - if (MVT::isExtendedVT(VT)) { + MVT VT = cast<VTSDNode>(N)->getVT(); + if (VT.isExtended()) { Erased = ExtendedValueTypeNodes.erase(VT); } else { - Erased = ValueTypeNodes[VT] != 0; - ValueTypeNodes[VT] = 0; + Erased = ValueTypeNodes[VT.getSimpleVT()] != 0; + ValueTypeNodes[VT.getSimpleVT()] = 0; } break; } @@ -684,13 +684,13 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { ID.AddInteger(LD->getAddressingMode()); ID.AddInteger(LD->getExtensionType()); - ID.AddInteger((unsigned int)(LD->getMemoryVT())); + ID.AddInteger(LD->getMemoryVT().V); ID.AddInteger(LD->getAlignment()); ID.AddInteger(LD->isVolatile()); } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { ID.AddInteger(ST->getAddressingMode()); ID.AddInteger(ST->isTruncatingStore()); - ID.AddInteger((unsigned int)(ST->getMemoryVT())); + ID.AddInteger(ST->getMemoryVT().V); ID.AddInteger(ST->getAlignment()); ID.AddInteger(ST->isVolatile()); } @@ -712,10 +712,10 @@ SelectionDAG::~SelectionDAG() { } } -SDOperand SelectionDAG::getZeroExtendInReg(SDOperand Op, MVT::ValueType VT) { +SDOperand SelectionDAG::getZeroExtendInReg(SDOperand Op, MVT VT) { if (Op.getValueType() == VT) return Op; APInt Imm = APInt::getLowBitsSet(Op.getValueSizeInBits(), - MVT::getSizeInBits(VT)); + VT.getSizeInBits()); return getNode(ISD::AND, Op.getValueType(), Op, getConstant(Imm, Op.getValueType())); } @@ -729,20 +729,20 @@ SDOperand SelectionDAG::getString(const std::string &Val) { return SDOperand(N, 0); } -SDOperand SelectionDAG::getConstant(uint64_t Val, MVT::ValueType VT, bool isT) { - MVT::ValueType EltVT = - MVT::isVector(VT) ? MVT::getVectorElementType(VT) : VT; +SDOperand SelectionDAG::getConstant(uint64_t Val, MVT VT, bool isT) { + MVT EltVT = + VT.isVector() ? VT.getVectorElementType() : VT; - return getConstant(APInt(MVT::getSizeInBits(EltVT), Val), VT, isT); + return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT); } -SDOperand SelectionDAG::getConstant(const APInt &Val, MVT::ValueType VT, bool isT) { - assert(MVT::isInteger(VT) && "Cannot create FP integer constant!"); +SDOperand SelectionDAG::getConstant(const APInt &Val, MVT VT, bool isT) { + assert(VT.isInteger() && "Cannot create FP integer constant!"); - MVT::ValueType EltVT = - MVT::isVector(VT) ? MVT::getVectorElementType(VT) : VT; + MVT EltVT = + VT.isVector() ? VT.getVectorElementType() : VT; - assert(Val.getBitWidth() == MVT::getSizeInBits(EltVT) && + assert(Val.getBitWidth() == EltVT.getSizeInBits() && "APInt size does not match type size!"); unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; @@ -752,7 +752,7 @@ SDOperand SelectionDAG::getConstant(const APInt &Val, MVT::ValueType VT, bool is void *IP = 0; SDNode *N = NULL; if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) - if (!MVT::isVector(VT)) + if (!VT.isVector()) return SDOperand(N, 0); if (!N) { N = new ConstantSDNode(isT, Val, EltVT); @@ -761,9 +761,9 @@ SDOperand SelectionDAG::getConstant(const APInt &Val, MVT::ValueType VT, bool is } SDOperand Result(N, 0); - if (MVT::isVector(VT)) { + if (VT.isVector()) { SmallVector<SDOperand, 8> Ops; - Ops.assign(MVT::getVectorNumElements(VT), Result); + Ops.assign(VT.getVectorNumElements(), Result); Result = getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); } return Result; @@ -774,12 +774,11 @@ SDOperand SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) { } -SDOperand SelectionDAG::getConstantFP(const APFloat& V, MVT::ValueType VT, - bool isTarget) { - assert(MVT::isFloatingPoint(VT) && "Cannot create integer FP constant!"); +SDOperand SelectionDAG::getConstantFP(const APFloat& V, MVT VT, bool isTarget) { + assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); - MVT::ValueType EltVT = - MVT::isVector(VT) ? MVT::getVectorElementType(VT) : VT; + MVT EltVT = + VT.isVector() ? VT.getVectorElementType() : VT; // Do the map lookup using the actual bit pattern for the floating point // value, so that we don't have problems with 0.0 comparing equal to -0.0, and @@ -791,7 +790,7 @@ SDOperand SelectionDAG::getConstantFP(const APFloat& V, MVT::ValueType VT, void *IP = 0; SDNode *N = NULL; if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) - if (!MVT::isVector(VT)) + if (!VT.isVector()) return SDOperand(N, 0); if (!N) { N = new ConstantFPSDNode(isTarget, V, EltVT); @@ -800,18 +799,17 @@ SDOperand SelectionDAG::getConstantFP(const APFloat& V, MVT::ValueType VT, } SDOperand Result(N, 0); - if (MVT::isVector(VT)) { + if (VT.isVector()) { SmallVector<SDOperand, 8> Ops; - Ops.assign(MVT::getVectorNumElements(VT), Result); + Ops.assign(VT.getVectorNumElements(), Result); Result = getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); } return Result; } -SDOperand SelectionDAG::getConstantFP(double Val, MVT::ValueType VT, - bool isTarget) { - MVT::ValueType EltVT = - MVT::isVector(VT) ? MVT::getVectorElementType(VT) : VT; +SDOperand SelectionDAG::getConstantFP(double Val, MVT VT, bool isTarget) { + MVT EltVT = + VT.isVector() ? VT.getVectorElementType() : VT; if (EltVT==MVT::f32) return getConstantFP(APFloat((float)Val), VT, isTarget); else @@ -819,7 +817,7 @@ SDOperand SelectionDAG::getConstantFP(double Val, MVT::ValueType VT, } SDOperand SelectionDAG::getGlobalAddress(const GlobalValue *GV, - MVT::ValueType VT, int Offset, + MVT VT, int Offset, bool isTargetGA) { unsigned Opc; @@ -848,8 +846,7 @@ SDOperand SelectionDAG::getGlobalAddress(const GlobalValue *GV, return SDOperand(N, 0); } -SDOperand SelectionDAG::getFrameIndex(int FI, MVT::ValueType VT, - bool isTarget) { +SDOperand SelectionDAG::getFrameIndex(int FI, MVT VT, bool isTarget) { unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, getVTList(VT), (SDOperand*)0, 0); @@ -863,7 +860,7 @@ SDOperand SelectionDAG::getFrameIndex(int FI, MVT::ValueType VT, return SDOperand(N, 0); } -SDOperand SelectionDAG::getJumpTable(int JTI, MVT::ValueType VT, bool isTarget){ +SDOperand SelectionDAG::getJumpTable(int JTI, MVT VT, bool isTarget){ unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, getVTList(VT), (SDOperand*)0, 0); @@ -877,7 +874,7 @@ SDOperand SelectionDAG::getJumpTable(int JTI, MVT::ValueType VT, bool isTarget){ return SDOperand(N, 0); } -SDOperand SelectionDAG::getConstantPool(Constant *C, MVT::ValueType VT, +SDOperand SelectionDAG::getConstantPool(Constant *C, MVT VT, unsigned Alignment, int Offset, bool isTarget) { unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; @@ -896,8 +893,7 @@ SDOperand SelectionDAG::getConstantPool(Constant *C, MVT::ValueType VT, } -SDOperand SelectionDAG::getConstantPool(MachineConstantPoolValue *C, - MVT::ValueType VT, +SDOperand SelectionDAG::getConstantPool(MachineConstantPoolValue *C, MVT VT, unsigned Alignment, int Offset, bool isTarget) { unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; @@ -942,12 +938,12 @@ SDOperand SelectionDAG::getArgFlags(ISD::ArgFlagsTy Flags) { return SDOperand(N, 0); } -SDOperand SelectionDAG::getValueType(MVT::ValueType VT) { - if (!MVT::isExtendedVT(VT) && (unsigned)VT >= ValueTypeNodes.size()) - ValueTypeNodes.resize(VT+1); +SDOperand SelectionDAG::getValueType(MVT VT) { + if (VT.isSimple() && (unsigned)VT.getSimpleVT() >= ValueTypeNodes.size()) + ValueTypeNodes.resize(VT.getSimpleVT()+1); - SDNode *&N = MVT::isExtendedVT(VT) ? - ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT]; + SDNode *&N = VT.isExtended() ? + ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT()]; if (N) return SDOperand(N, 0); N = new VTSDNode(VT); @@ -955,7 +951,7 @@ SDOperand SelectionDAG::getValueType(MVT::ValueType VT) { return SDOperand(N, 0); } -SDOperand SelectionDAG::getExternalSymbol(const char *Sym, MVT::ValueType VT) { +SDOperand SelectionDAG::getExternalSymbol(const char *Sym, MVT VT) { SDNode *&N = ExternalSymbols[Sym]; if (N) return SDOperand(N, 0); N = new ExternalSymbolSDNode(false, Sym, VT); @@ -963,8 +959,7 @@ SDOperand SelectionDAG::getExternalSymbol(const char *Sym, MVT::ValueType VT) { return SDOperand(N, 0); } -SDOperand SelectionDAG::getTargetExternalSymbol(const char *Sym, - MVT::ValueType VT) { +SDOperand SelectionDAG::getTargetExternalSymbol(const char *Sym, MVT VT) { SDNode *&N = TargetExternalSymbols[Sym]; if (N) return SDOperand(N, 0); N = new ExternalSymbolSDNode(true, Sym, VT); @@ -975,7 +970,7 @@ SDOperand SelectionDAG::getTargetExternalSymbol(const char *Sym, SDOperand SelectionDAG::getCondCode(ISD::CondCode Cond) { if ((unsigned)Cond >= CondCodeNodes.size()) CondCodeNodes.resize(Cond+1); - + if (CondCodeNodes[Cond] == 0) { CondCodeNodes[Cond] = new CondCodeSDNode(Cond); AllNodes.push_back(CondCodeNodes[Cond]); @@ -983,7 +978,7 @@ SDOperand SelectionDAG::getCondCode(ISD::CondCode Cond) { return SDOperand(CondCodeNodes[Cond], 0); } -SDOperand SelectionDAG::getRegister(unsigned RegNo, MVT::ValueType VT) { +SDOperand SelectionDAG::getRegister(unsigned RegNo, MVT VT) { FoldingSetNodeID ID; AddNodeIDNode(ID, ISD::Register, getVTList(VT), (SDOperand*)0, 0); ID.AddInteger(RegNo); @@ -1039,17 +1034,17 @@ SDOperand SelectionDAG::getMemOperand(const MachineMemOperand &MO) { /// CreateStackTemporary - Create a stack temporary, suitable for holding the /// specified value type. -SDOperand SelectionDAG::CreateStackTemporary(MVT::ValueType VT) { +SDOperand SelectionDAG::CreateStackTemporary(MVT VT) { MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); - unsigned ByteSize = MVT::getSizeInBits(VT)/8; - const Type *Ty = MVT::getTypeForValueType(VT); + unsigned ByteSize = VT.getSizeInBits()/8; + const Type *Ty = VT.getTypeForMVT(); unsigned StackAlign = (unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty); int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign); return getFrameIndex(FrameIdx, TLI.getPointerTy()); } -SDOperand SelectionDAG::FoldSetCC(MVT::ValueType VT, SDOperand N1, +SDOperand SelectionDAG::FoldSetCC(MVT VT, SDOperand N1, SDOperand N2, ISD::CondCode Cond) { // These setcc operations always fold. switch (Cond) { @@ -1069,7 +1064,7 @@ SDOperand SelectionDAG::FoldSetCC(MVT::ValueType VT, SDOperand N1, case ISD::SETUO: case ISD::SETUEQ: case ISD::SETUNE: - assert(!MVT::isInteger(N1.getValueType()) && "Illegal setcc for integer!"); + assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); break; } @@ -1177,7 +1172,7 @@ void SelectionDAG::ComputeMaskedBits(SDOperand Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, unsigned Depth) const { unsigned BitWidth = Mask.getBitWidth(); - assert(BitWidth == MVT::getSizeInBits(Op.getValueType()) && + assert(BitWidth == Op.getValueType().getSizeInBits() && "Mask size mismatches value type size!"); KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. @@ -1372,8 +1367,8 @@ void SelectionDAG::ComputeMaskedBits(SDOperand Op, const APInt &Mask, } return; case ISD::SIGN_EXTEND_INREG: { - MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); - unsigned EBits = MVT::getSizeInBits(EVT); + MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); + unsigned EBits = EVT.getSizeInBits(); // Sign extension. Compute the demanded bits in the result that are not // present in the input. @@ -1417,15 +1412,15 @@ void SelectionDAG::ComputeMaskedBits(SDOperand Op, const APInt &Mask, case ISD::LOAD: { if (ISD::isZEXTLoad(Op.Val)) { LoadSDNode *LD = cast<LoadSDNode>(Op); - MVT::ValueType VT = LD->getMemoryVT(); - unsigned MemBits = MVT::getSizeInBits(VT); + MVT VT = LD->getMemoryVT(); + unsigned MemBits = VT.getSizeInBits(); KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits) & Mask; } return; } case ISD::ZERO_EXTEND: { - MVT::ValueType InVT = Op.getOperand(0).getValueType(); - unsigned InBits = MVT::getSizeInBits(InVT); + MVT InVT = Op.getOperand(0).getValueType(); + unsigned InBits = InVT.getSizeInBits(); APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask; APInt InMask = Mask; InMask.trunc(InBits); @@ -1438,8 +1433,8 @@ void SelectionDAG::ComputeMaskedBits(SDOperand Op, const APInt &Mask, return; } case ISD::SIGN_EXTEND: { - MVT::ValueType InVT = Op.getOperand(0).getValueType(); - unsigned InBits = MVT::getSizeInBits(InVT); + MVT InVT = Op.getOperand(0).getValueType(); + unsigned InBits = InVT.getSizeInBits(); APInt InSignBit = APInt::getSignBit(InBits); APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask; APInt InMask = Mask; @@ -1479,8 +1474,8 @@ void SelectionDAG::ComputeMaskedBits(SDOperand Op, const APInt &Mask, return; } case ISD::ANY_EXTEND: { - MVT::ValueType InVT = Op.getOperand(0).getValueType(); - unsigned InBits = MVT::getSizeInBits(InVT); + MVT InVT = Op.getOperand(0).getValueType(); + unsigned InBits = InVT.getSizeInBits(); APInt InMask = Mask; InMask.trunc(InBits); KnownZero.trunc(InBits); @@ -1491,8 +1486,8 @@ void SelectionDAG::ComputeMaskedBits(SDOperand Op, const APInt &Mask, return; } case ISD::TRUNCATE: { - MVT::ValueType InVT = Op.getOperand(0).getValueType(); - unsigned InBits = MVT::getSizeInBits(InVT); + MVT InVT = Op.getOperand(0).getValueType(); + unsigned InBits = InVT.getSizeInBits(); APInt InMask = Mask; InMask.zext(InBits); KnownZero.zext(InBits); @@ -1504,8 +1499,8 @@ void SelectionDAG::ComputeMaskedBits(SDOperand Op, const APInt &Mask, break; } case ISD::AssertZext: { - MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); - APInt InMask = APInt::getLowBitsSet(BitWidth, MVT::getSizeInBits(VT)); + MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); + APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero, KnownOne, Depth+1); KnownZero |= (~InMask) & Mask; @@ -1624,9 +1619,9 @@ void SelectionDAG::ComputeMaskedBits(SDOperand Op, const APInt &Mask, /// information. For example, immediately after an "SRA X, 2", we know that /// the top 3 bits are all equal to each other, so we return 3. unsigned SelectionDAG::ComputeNumSignBits(SDOperand Op, unsigned Depth) const{ - MVT::ValueType VT = Op.getValueType(); - assert(MVT::isInteger(VT) && "Invalid VT!"); - unsigned VTBits = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + assert(VT.isInteger() && "Invalid VT!"); + unsigned VTBits = VT.getSizeInBits(); unsigned Tmp, Tmp2; unsigned FirstAnswer = 1; @@ -1636,10 +1631,10 @@ unsigned SelectionDAG::ComputeNumSignBits(SDOperand Op, unsigned Depth) const{ switch (Op.getOpcode()) { default: break; case ISD::AssertSext: - Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT()); + Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); return VTBits-Tmp+1; case ISD::AssertZext: - Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT()); + Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); return VTBits-Tmp; case ISD::Constant: { @@ -1653,12 +1648,12 @@ unsigned SelectionDAG::ComputeNumSignBits(SDOperand Op, unsigned Depth) const{ } case ISD::SIGN_EXTEND: - Tmp = VTBits-MVT::getSizeInBits(Op.getOperand(0).getValueType()); + Tmp = VTBits-Op.getOperand(0).getValueType().getSizeInBits(); return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; case ISD::SIGN_EXTEND_INREG: // Max of the input and what this extends. - Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT()); + Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); Tmp = VTBits-Tmp+1; Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); @@ -1793,10 +1788,10 @@ unsigned SelectionDAG::ComputeNumSignBits(SDOperand Op, unsigned Depth) const{ switch (ExtType) { default: break; case ISD::SEXTLOAD: // '17' bits known - Tmp = MVT::getSizeInBits(LD->getMemoryVT()); + Tmp = LD->getMemoryVT().getSizeInBits(); return VTBits-Tmp+1; case ISD::ZEXTLOAD: // '16' bits known - Tmp = MVT::getSizeInBits(LD->getMemoryVT()); + Tmp = LD->getMemoryVT().getSizeInBits(); return VTBits-Tmp; } } @@ -1848,7 +1843,7 @@ bool SelectionDAG::isVerifiedDebugInfoDesc(SDOperand Op) const { /// getShuffleScalarElt - Returns the scalar element that will make up the ith /// element of the result of the vector shuffle. SDOperand SelectionDAG::getShuffleScalarElt(const SDNode *N, unsigned Idx) { - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); SDOperand PermMask = N->getOperand(2); unsigned NumElems = PermMask.getNumOperands(); SDOperand V = (Idx < NumElems) ? N->getOperand(0) : N->getOperand(1); @@ -1856,18 +1851,18 @@ SDOperand SelectionDAG::getShuffleScalarElt(const SDNode *N, unsigned Idx) { if (V.getOpcode() == ISD::BIT_CONVERT) { V = V.getOperand(0); - if (MVT::getVectorNumElements(V.getValueType()) != NumElems) + if (V.getValueType().getVectorNumElements() != NumElems) return SDOperand(); } if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) return (Idx == 0) ? V.getOperand(0) - : getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); + : getNode(ISD::UNDEF, VT.getVectorElementType()); if (V.getOpcode() == ISD::BUILD_VECTOR) return V.getOperand(Idx); if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { SDOperand Elt = PermMask.getOperand(Idx); if (Elt.getOpcode() == ISD::UNDEF) - return getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); + return getNode(ISD::UNDEF, VT.getVectorElementType()); return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Elt)->getValue()); } return SDOperand(); @@ -1876,7 +1871,7 @@ SDOperand SelectionDAG::getShuffleScalarElt(const SDNode *N, unsigned Idx) { /// getNode - Gets or creates the specified node. /// -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT) { +SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT) { FoldingSetNodeID ID; AddNodeIDNode(ID, Opcode, getVTList(VT), (SDOperand*)0, 0); void *IP = 0; @@ -1889,12 +1884,11 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT) { return SDOperand(N, 0); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, - SDOperand Operand) { +SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDOperand Operand) { // Constant fold unary operations with an integer constant operand. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.Val)) { const APInt &Val = C->getAPIntValue(); - unsigned BitWidth = MVT::getSizeInBits(VT); + unsigned BitWidth = VT.getSizeInBits(); switch (Opcode) { default: break; case ISD::SIGN_EXTEND: @@ -1979,56 +1973,56 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, return Operand; // Factor or merge of one node? No need. case ISD::FP_ROUND: assert(0 && "Invalid method to make FP_ROUND node"); case ISD::FP_EXTEND: - assert(MVT::isFloatingPoint(VT) && - MVT::isFloatingPoint(Operand.getValueType()) && "Invalid FP cast!"); + assert(VT.isFloatingPoint() && + Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); if (Operand.getValueType() == VT) return Operand; // noop conversion. if (Operand.getOpcode() == ISD::UNDEF) return getNode(ISD::UNDEF, VT); break; case ISD::SIGN_EXTEND: - assert(MVT::isInteger(VT) && MVT::isInteger(Operand.getValueType()) && + assert(VT.isInteger() && Operand.getValueType().isInteger() && "Invalid SIGN_EXTEND!"); if (Operand.getValueType() == VT) return Operand; // noop extension - assert(MVT::getSizeInBits(Operand.getValueType()) < MVT::getSizeInBits(VT) + assert(Operand.getValueType().getSizeInBits() < VT.getSizeInBits() && "Invalid sext node, dst < src!"); if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) return getNode(OpOpcode, VT, Operand.Val->getOperand(0)); break; case ISD::ZERO_EXTEND: - assert(MVT::isInteger(VT) && MVT::isInteger(Operand.getValueType()) && + assert(VT.isInteger() && Operand.getValueType().isInteger() && "Invalid ZERO_EXTEND!"); if (Operand.getValueType() == VT) return Operand; // noop extension - assert(MVT::getSizeInBits(Operand.getValueType()) < MVT::getSizeInBits(VT) + assert(Operand.getValueType().getSizeInBits() < VT.getSizeInBits() && "Invalid zext node, dst < src!"); if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) return getNode(ISD::ZERO_EXTEND, VT, Operand.Val->getOperand(0)); break; case ISD::ANY_EXTEND: - assert(MVT::isInteger(VT) && MVT::isInteger(Operand.getValueType()) && + assert(VT.isInteger() && Operand.getValueType().isInteger() && "Invalid ANY_EXTEND!"); if (Operand.getValueType() == VT) return Operand; // noop extension - assert(MVT::getSizeInBits(Operand.getValueType()) < MVT::getSizeInBits(VT) + assert(Operand.getValueType().getSizeInBits() < VT.getSizeInBits() && "Invalid anyext node, dst < src!"); if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND) // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) return getNode(OpOpcode, VT, Operand.Val->getOperand(0)); break; case ISD::TRUNCATE: - assert(MVT::isInteger(VT) && MVT::isInteger(Operand.getValueType()) && + assert(VT.isInteger() && Operand.getValueType().isInteger() && "Invalid TRUNCATE!"); if (Operand.getValueType() == VT) return Operand; // noop truncate - assert(MVT::getSizeInBits(Operand.getValueType()) > MVT::getSizeInBits(VT) + assert(Operand.getValueType().getSizeInBits() > VT.getSizeInBits() && "Invalid truncate node, src < dst!"); if (OpOpcode == ISD::TRUNCATE) return getNode(ISD::TRUNCATE, VT, Operand.Val->getOperand(0)); else if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ANY_EXTEND) { // If the source is smaller than the dest, we still need an extend. - if (MVT::getSizeInBits(Operand.Val->getOperand(0).getValueType()) - < MVT::getSizeInBits(VT)) + if (Operand.Val->getOperand(0).getValueType().getSizeInBits() + < VT.getSizeInBits()) return getNode(OpOpcode, VT, Operand.Val->getOperand(0)); - else if (MVT::getSizeInBits(Operand.Val->getOperand(0).getValueType()) - > MVT::getSizeInBits(VT)) + else if (Operand.Val->getOperand(0).getValueType().getSizeInBits() + > VT.getSizeInBits()) return getNode(ISD::TRUNCATE, VT, Operand.Val->getOperand(0)); else return Operand.Val->getOperand(0); @@ -2036,7 +2030,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, break; case ISD::BIT_CONVERT: // Basic sanity checking. - assert(MVT::getSizeInBits(VT) == MVT::getSizeInBits(Operand.getValueType()) + assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits() && "Cannot BIT_CONVERT between types of different sizes!"); if (VT == Operand.getValueType()) return Operand; // noop conversion. if (OpOpcode == ISD::BIT_CONVERT) // bitconv(bitconv(x)) -> bitconv(x) @@ -2045,8 +2039,8 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, return getNode(ISD::UNDEF, VT); break; case ISD::SCALAR_TO_VECTOR: - assert(MVT::isVector(VT) && !MVT::isVector(Operand.getValueType()) && - MVT::getVectorElementType(VT) == Operand.getValueType() && + assert(VT.isVector() && !Operand.getValueType().isVector() && + VT.getVectorElementType() == Operand.getValueType() && "Illegal SCALAR_TO_VECTOR node!"); if (OpOpcode == ISD::UNDEF) return getNode(ISD::UNDEF, VT); @@ -2090,7 +2084,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, +SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDOperand N1, SDOperand N2) { ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.Val); @@ -2104,7 +2098,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, if (N2.getOpcode() == ISD::EntryToken) return N1; break; case ISD::AND: - assert(MVT::isInteger(VT) && N1.getValueType() == N2.getValueType() && + assert(VT.isInteger() && N1.getValueType() == N2.getValueType() && N1.getValueType() == VT && "Binary operator types must match!"); // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's // worth handling here. @@ -2117,7 +2111,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, case ISD::XOR: case ISD::ADD: case ISD::SUB: - assert(MVT::isInteger(VT) && N1.getValueType() == N2.getValueType() && + assert(VT.isInteger() && N1.getValueType() == N2.getValueType() && N1.getValueType() == VT && "Binary operator types must match!"); // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so // it's worth handling here. @@ -2128,7 +2122,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, case ISD::UREM: case ISD::MULHU: case ISD::MULHS: - assert(MVT::isInteger(VT) && "This operator does not apply to FP types!"); + assert(VT.isInteger() && "This operator does not apply to FP types!"); // fall through case ISD::MUL: case ISD::SDIV: @@ -2143,8 +2137,8 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, break; case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. assert(N1.getValueType() == VT && - MVT::isFloatingPoint(N1.getValueType()) && - MVT::isFloatingPoint(N2.getValueType()) && + N1.getValueType().isFloatingPoint() && + N2.getValueType().isFloatingPoint() && "Invalid FCOPYSIGN!"); break; case ISD::SHL: @@ -2154,49 +2148,49 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, case ISD::ROTR: assert(VT == N1.getValueType() && "Shift operators return type must be the same as their first arg"); - assert(MVT::isInteger(VT) && MVT::isInteger(N2.getValueType()) && + assert(VT.isInteger() && N2.getValueType().isInteger() && VT != MVT::i1 && "Shifts only work on integers"); break; case ISD::FP_ROUND_INREG: { - MVT::ValueType EVT = cast<VTSDNode>(N2)->getVT(); + MVT EVT = cast<VTSDNode>(N2)->getVT(); assert(VT == N1.getValueType() && "Not an inreg round!"); - assert(MVT::isFloatingPoint(VT) && MVT::isFloatingPoint(EVT) && + assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && "Cannot FP_ROUND_INREG integer types"); - assert(MVT::getSizeInBits(EVT) <= MVT::getSizeInBits(VT) && + assert(EVT.getSizeInBits() <= VT.getSizeInBits() && "Not rounding down!"); if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. break; } case ISD::FP_ROUND: - assert(MVT::isFloatingPoint(VT) && - MVT::isFloatingPoint(N1.getValueType()) && - MVT::getSizeInBits(VT) <= MVT::getSizeInBits(N1.getValueType()) && + assert(VT.isFloatingPoint() && + N1.getValueType().isFloatingPoint() && + VT.getSizeInBits() <= N1.getValueType().getSizeInBits() && isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!"); if (N1.getValueType() == VT) return N1; // noop conversion. break; case ISD::AssertSext: case ISD::AssertZext: { - MVT::ValueType EVT = cast<VTSDNode>(N2)->getVT(); + MVT EVT = cast<VTSDNode>(N2)->getVT(); assert(VT == N1.getValueType() && "Not an inreg extend!"); - assert(MVT::isInteger(VT) && MVT::isInteger(EVT) && + assert(VT.isInteger() && EVT.isInteger() && "Cannot *_EXTEND_INREG FP types"); - assert(MVT::getSizeInBits(EVT) <= MVT::getSizeInBits(VT) && + assert(EVT.getSizeInBits() <= VT.getSizeInBits() && "Not extending!"); if (VT == EVT) return N1; // noop assertion. break; } case ISD::SIGN_EXTEND_INREG: { - MVT::ValueType EVT = cast<VTSDNode>(N2)->getVT(); + MVT EVT = cast<VTSDNode>(N2)->getVT(); assert(VT == N1.getValueType() && "Not an inreg extend!"); - assert(MVT::isInteger(VT) && MVT::isInteger(EVT) && + assert(VT.isInteger() && EVT.isInteger() && "Cannot *_EXTEND_INREG FP types"); - assert(MVT::getSizeInBits(EVT) <= MVT::getSizeInBits(VT) && + assert(EVT.getSizeInBits() <= VT.getSizeInBits() && "Not extending!"); if (EVT == VT) return N1; // Not actually extending if (N1C) { APInt Val = N1C->getAPIntValue(); - unsigned FromBits = MVT::getSizeInBits(cast<VTSDNode>(N2)->getVT()); + unsigned FromBits = cast<VTSDNode>(N2)->getVT().getSizeInBits(); Val <<= Val.getBitWidth()-FromBits; Val = Val.ashr(Val.getBitWidth()-FromBits); return getConstant(Val, VT); @@ -2215,7 +2209,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) { unsigned Factor = - MVT::getVectorNumElements(N1.getOperand(0).getValueType()); + N1.getOperand(0).getValueType().getVectorNumElements(); return getNode(ISD::EXTRACT_VECTOR_ELT, VT, N1.getOperand(N2C->getValue() / Factor), getConstant(N2C->getValue() % Factor, N2.getValueType())); @@ -2238,9 +2232,9 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, break; case ISD::EXTRACT_ELEMENT: assert(N2C && (unsigned)N2C->getValue() < 2 && "Bad EXTRACT_ELEMENT!"); - assert(!MVT::isVector(N1.getValueType()) && - MVT::isInteger(N1.getValueType()) && - !MVT::isVector(VT) && MVT::isInteger(VT) && + assert(!N1.getValueType().isVector() && + N1.getValueType().isInteger() && + !VT.isVector() && VT.isInteger() && "EXTRACT_ELEMENT only applies to integers!"); // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding @@ -2251,7 +2245,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, // EXTRACT_ELEMENT of a constant int is also very common. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { - unsigned ElementSize = MVT::getSizeInBits(VT); + unsigned ElementSize = VT.getSizeInBits(); unsigned Shift = ElementSize * N2C->getValue(); APInt ShiftedVal = C->getAPIntValue().lshr(Shift); return getConstant(ShiftedVal.trunc(ElementSize), VT); @@ -2365,7 +2359,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, case ISD::SREM: case ISD::SRL: case ISD::SHL: - if (!MVT::isVector(VT)) + if (!VT.isVector()) return getConstant(0, VT); // fold op(undef, arg2) -> 0 // For vectors, we can't easily build an all zero vector, just return // the LHS. @@ -2401,14 +2395,14 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, case ISD::AND: case ISD::SRL: case ISD::SHL: - if (!MVT::isVector(VT)) + if (!VT.isVector()) return getConstant(0, VT); // fold op(arg1, undef) -> 0 // For vectors, we can't easily build an all zero vector, just return // the LHS. return N1; case ISD::OR: - if (!MVT::isVector(VT)) - return getConstant(MVT::getIntVTBitMask(VT), VT); + if (!VT.isVector()) + return getConstant(VT.getIntegerVTBitMask(), VT); // For vectors, we can't easily build an all one vector, just return // the LHS. return N1; @@ -2437,7 +2431,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, return SDOperand(N, 0); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, +SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDOperand N1, SDOperand N2, SDOperand N3) { // Perform various simplifications. ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); @@ -2469,9 +2463,9 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, break; case ISD::VECTOR_SHUFFLE: assert(VT == N1.getValueType() && VT == N2.getValueType() && - MVT::isVector(VT) && MVT::isVector(N3.getValueType()) && + VT.isVector() && N3.getValueType().isVector() && N3.getOpcode() == ISD::BUILD_VECTOR && - MVT::getVectorNumElements(VT) == N3.getNumOperands() && + VT.getVectorNumElements() == N3.getNumOperands() && "Illegal VECTOR_SHUFFLE node!"); break; case ISD::BIT_CONVERT: @@ -2500,14 +2494,14 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, return SDOperand(N, 0); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, +SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDOperand N1, SDOperand N2, SDOperand N3, SDOperand N4) { SDOperand Ops[] = { N1, N2, N3, N4 }; return getNode(Opcode, VT, Ops, 4); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, +SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDOperand N1, SDOperand N2, SDOperand N3, SDOperand N4, SDOperand N5) { SDOperand Ops[] = { N1, N2, N3, N4, N5 }; @@ -2516,10 +2510,9 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, /// getMemsetValue - Vectorized representation of the memset value /// operand. -static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT, - SelectionDAG &DAG) { - unsigned NumBits = MVT::isVector(VT) ? - MVT::getSizeInBits(MVT::getVectorElementType(VT)) : MVT::getSizeInBits(VT); +static SDOperand getMemsetValue(SDOperand Value, MVT VT, SelectionDAG &DAG) { + unsigned NumBits = VT.isVector() ? + VT.getVectorElementType().getSizeInBits() : VT.getSizeInBits(); if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { APInt Val = APInt(NumBits, C->getValue() & 255); unsigned Shift = 8; @@ -2527,7 +2520,7 @@ static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT, Val = (Val << Shift) | Val; Shift <<= 1; } - if (MVT::isInteger(VT)) + if (VT.isInteger()) return DAG.getConstant(Val, VT); return DAG.getConstantFP(APFloat(Val), VT); } @@ -2547,11 +2540,11 @@ static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT, /// getMemsetStringVal - Similar to getMemsetValue. Except this is only /// used when a memcpy is turned into a memset when the source is a constant /// string ptr. -static SDOperand getMemsetStringVal(MVT::ValueType VT, SelectionDAG &DAG, +static SDOperand getMemsetStringVal(MVT VT, SelectionDAG &DAG, const TargetLowering &TLI, std::string &Str, unsigned Offset) { - assert(!MVT::isVector(VT) && "Can't handle vector type here!"); - unsigned NumBits = MVT::getSizeInBits(VT); + assert(!VT.isVector() && "Can't handle vector type here!"); + unsigned NumBits = VT.getSizeInBits(); unsigned MSB = NumBits / 8; uint64_t Val = 0; if (TLI.isLittleEndian()) @@ -2567,7 +2560,7 @@ static SDOperand getMemsetStringVal(MVT::ValueType VT, SelectionDAG &DAG, /// static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset, SelectionDAG &DAG) { - MVT::ValueType VT = Base.getValueType(); + MVT VT = Base.getValueType(); return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT)); } @@ -2604,7 +2597,7 @@ static bool isMemSrcFromString(SDOperand Src, std::string &Str, /// to replace the memset / memcpy is below the threshold. It also returns the /// types of the sequence of memory ops to perform memset / memcpy. static -bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps, +bool MeetsMaxMemopRequirement(std::vector<MVT> &MemOps, SDOperand Dst, SDOperand Src, unsigned Limit, uint64_t Size, unsigned &Align, SelectionDAG &DAG, @@ -2615,10 +2608,10 @@ bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps, uint64_t SrcOff = 0; bool isSrcStr = isMemSrcFromString(Src, Str, SrcOff); bool isSrcConst = isa<ConstantSDNode>(Src); - MVT::ValueType VT= TLI.getOptimalMemOpType(Size, Align, isSrcConst, isSrcStr); + MVT VT= TLI.getOptimalMemOpType(Size, Align, isSrcConst, isSrcStr); if (VT != MVT::iAny) { unsigned NewAlign = (unsigned) - TLI.getTargetData()->getABITypeAlignment(MVT::getTypeForValueType(VT)); + TLI.getTargetData()->getABITypeAlignment(VT.getTypeForMVT()); // If source is a string constant, this will require an unaligned load. if (NewAlign > Align && (isSrcConst || AllowUnalign)) { if (Dst.getOpcode() != ISD::FrameIndex) { @@ -2654,10 +2647,10 @@ bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps, } } - MVT::ValueType LVT = MVT::i64; + MVT LVT = MVT::i64; while (!TLI.isTypeLegal(LVT)) - LVT = (MVT::ValueType)((unsigned)LVT - 1); - assert(MVT::isInteger(LVT)); + LVT = (MVT::SimpleValueType)(LVT.getSimpleVT() - 1); + assert(LVT.isInteger()); if (VT > LVT) VT = LVT; @@ -2665,16 +2658,16 @@ bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps, unsigned NumMemOps = 0; while (Size != 0) { - unsigned VTSize = MVT::getSizeInBits(VT) / 8; + unsigned VTSize = VT.getSizeInBits() / 8; while (VTSize > Size) { // For now, only use non-vector load / store's for the left-over pieces. - if (MVT::isVector(VT)) { + if (VT.isVector()) { VT = MVT::i64; while (!TLI.isTypeLegal(VT)) - VT = (MVT::ValueType)((unsigned)VT - 1); - VTSize = MVT::getSizeInBits(VT) / 8; + VT = (MVT::SimpleValueType)(VT.getSimpleVT() - 1); + VTSize = VT.getSizeInBits() / 8; } else { - VT = (MVT::ValueType)((unsigned)VT - 1); + VT = (MVT::SimpleValueType)(VT.getSimpleVT() - 1); VTSize >>= 1; } } @@ -2698,7 +2691,7 @@ static SDOperand getMemcpyLoadsAndStores(SelectionDAG &DAG, // Expand memcpy to a series of load and store ops if the size operand falls // below a certain threshold. - std::vector<MVT::ValueType> MemOps; + std::vector<MVT> MemOps; uint64_t Limit = -1; if (!AlwaysInline) Limit = TLI.getMaxStoresPerMemcpy(); @@ -2714,11 +2707,11 @@ static SDOperand getMemcpyLoadsAndStores(SelectionDAG &DAG, SmallVector<SDOperand, 8> OutChains; unsigned NumMemOps = MemOps.size(); for (unsigned i = 0; i < NumMemOps; i++) { - MVT::ValueType VT = MemOps[i]; - unsigned VTSize = MVT::getSizeInBits(VT) / 8; + MVT VT = MemOps[i]; + unsigned VTSize = VT.getSizeInBits() / 8; SDOperand Value, Store; - if (CopyFromStr && !MVT::isVector(VT)) { + if (CopyFromStr && !VT.isVector()) { // It's unlikely a store of a vector immediate can be done in a single // instruction. It would require a load from a constantpool first. // FIXME: Handle cases where store of vector immediate is done in a @@ -2754,7 +2747,7 @@ static SDOperand getMemmoveLoadsAndStores(SelectionDAG &DAG, // Expand memmove to a series of load and store ops if the size operand falls // below a certain threshold. - std::vector<MVT::ValueType> MemOps; + std::vector<MVT> MemOps; uint64_t Limit = -1; if (!AlwaysInline) Limit = TLI.getMaxStoresPerMemmove(); @@ -2770,8 +2763,8 @@ static SDOperand getMemmoveLoadsAndStores(SelectionDAG &DAG, SmallVector<SDOperand, 8> OutChains; unsigned NumMemOps = MemOps.size(); for (unsigned i = 0; i < NumMemOps; i++) { - MVT::ValueType VT = MemOps[i]; - unsigned VTSize = MVT::getSizeInBits(VT) / 8; + MVT VT = MemOps[i]; + unsigned VTSize = VT.getSizeInBits() / 8; SDOperand Value, Store; Value = DAG.getLoad(VT, Chain, @@ -2785,8 +2778,8 @@ static SDOperand getMemmoveLoadsAndStores(SelectionDAG &DAG, &LoadChains[0], LoadChains.size()); OutChains.clear(); for (unsigned i = 0; i < NumMemOps; i++) { - MVT::ValueType VT = MemOps[i]; - unsigned VTSize = MVT::getSizeInBits(VT) / 8; + MVT VT = MemOps[i]; + unsigned VTSize = VT.getSizeInBits() / 8; SDOperand Value, Store; Store = DAG.getStore(Chain, LoadValues[i], @@ -2809,7 +2802,7 @@ static SDOperand getMemsetStores(SelectionDAG &DAG, // Expand memset to a series of load/store ops if the size operand // falls below a certain threshold. - std::vector<MVT::ValueType> MemOps; + std::vector<MVT> MemOps; if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, TLI.getMaxStoresPerMemset(), Size, Align, DAG, TLI)) return SDOperand(); @@ -2819,8 +2812,8 @@ static SDOperand getMemsetStores(SelectionDAG &DAG, unsigned NumMemOps = MemOps.size(); for (unsigned i = 0; i < NumMemOps; i++) { - MVT::ValueType VT = MemOps[i]; - unsigned VTSize = MVT::getSizeInBits(VT) / 8; + MVT VT = MemOps[i]; + unsigned VTSize = VT.getSizeInBits() / 8; SDOperand Value = getMemsetValue(Src, VT, DAG); SDOperand Store = DAG.getStore(Chain, Value, getMemBasePlusOffset(Dst, DstOff, DAG), @@ -2984,14 +2977,14 @@ SDOperand SelectionDAG::getMemset(SDOperand Chain, SDOperand Dst, SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain, SDOperand Ptr, SDOperand Cmp, - SDOperand Swp, MVT::ValueType VT) { + SDOperand Swp, MVT VT) { assert(Opcode == ISD::ATOMIC_LCS && "Invalid Atomic Op"); assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); SDVTList VTs = getVTList(Cmp.getValueType(), MVT::Other); FoldingSetNodeID ID; SDOperand Ops[] = {Chain, Ptr, Cmp, Swp}; AddNodeIDNode(ID, Opcode, VTs, Ops, 4); - ID.AddInteger((unsigned int)VT); + ID.AddInteger(VT.V); void* IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDOperand(E, 0); @@ -3003,7 +2996,7 @@ SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain, SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain, SDOperand Ptr, SDOperand Val, - MVT::ValueType VT) { + MVT VT) { assert(( Opcode == ISD::ATOMIC_LAS || Opcode == ISD::ATOMIC_LSS || Opcode == ISD::ATOMIC_SWAP || Opcode == ISD::ATOMIC_LOAD_AND || Opcode == ISD::ATOMIC_LOAD_OR || Opcode == ISD::ATOMIC_LOAD_XOR @@ -3014,7 +3007,7 @@ SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain, FoldingSetNodeID ID; SDOperand Ops[] = {Chain, Ptr, Val}; AddNodeIDNode(ID, Opcode, VTs, Ops, 3); - ID.AddInteger((unsigned int)VT); + ID.AddInteger(VT.V); void* IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDOperand(E, 0); @@ -3026,14 +3019,14 @@ SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain, SDOperand SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, - MVT::ValueType VT, SDOperand Chain, + MVT VT, SDOperand Chain, SDOperand Ptr, SDOperand Offset, - const Value *SV, int SVOffset, MVT::ValueType EVT, + const Value *SV, int SVOffset, MVT EVT, bool isVolatile, unsigned Alignment) { if (Alignment == 0) { // Ensure that codegen never sees alignment 0 const Type *Ty = 0; if (VT != MVT::iPTR) { - Ty = MVT::getTypeForValueType(VT); + Ty = VT.getTypeForMVT(); } else if (SV) { const PointerType *PT = dyn_cast<PointerType>(SV->getType()); assert(PT && "Value for load must be a pointer"); @@ -3049,14 +3042,14 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, assert(VT == EVT && "Non-extending load from different memory type!"); } else { // Extending load. - if (MVT::isVector(VT)) - assert(EVT == MVT::getVectorElementType(VT) && "Invalid vector extload!"); + if (VT.isVector()) + assert(EVT == VT.getVectorElementType() && "Invalid vector extload!"); else - assert(MVT::getSizeInBits(EVT) < MVT::getSizeInBits(VT) && + assert(EVT.getSizeInBits() < VT.getSizeInBits() && "Should only be an extending load, not truncating!"); - assert((ExtType == ISD::EXTLOAD || MVT::isInteger(VT)) && + assert((ExtType == ISD::EXTLOAD || VT.isInteger()) && "Cannot sign/zero extend a FP/Vector load!"); - assert(MVT::isInteger(VT) == MVT::isInteger(EVT) && + assert(VT.isInteger() == EVT.isInteger() && "Cannot convert from FP to Int or Int -> FP!"); } @@ -3071,7 +3064,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3); ID.AddInteger(AM); ID.AddInteger(ExtType); - ID.AddInteger((unsigned int)EVT); + ID.AddInteger(EVT.V); ID.AddInteger(Alignment); ID.AddInteger(isVolatile); void *IP = 0; @@ -3084,7 +3077,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, return SDOperand(N, 0); } -SDOperand SelectionDAG::getLoad(MVT::ValueType VT, +SDOperand SelectionDAG::getLoad(MVT VT, SDOperand Chain, SDOperand Ptr, const Value *SV, int SVOffset, bool isVolatile, unsigned Alignment) { @@ -3093,10 +3086,10 @@ SDOperand SelectionDAG::getLoad(MVT::ValueType VT, SV, SVOffset, VT, isVolatile, Alignment); } -SDOperand SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, MVT::ValueType VT, +SDOperand SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, MVT VT, SDOperand Chain, SDOperand Ptr, const Value *SV, - int SVOffset, MVT::ValueType EVT, + int SVOffset, MVT EVT, bool isVolatile, unsigned Alignment) { SDOperand Undef = getNode(ISD::UNDEF, Ptr.getValueType()); return getLoad(ISD::UNINDEXED, ExtType, VT, Chain, Ptr, Undef, @@ -3118,12 +3111,12 @@ SelectionDAG::getIndexedLoad(SDOperand OrigLoad, SDOperand Base, SDOperand SelectionDAG::getStore(SDOperand Chain, SDOperand Val, SDOperand Ptr, const Value *SV, int SVOffset, bool isVolatile, unsigned Alignment) { - MVT::ValueType VT = Val.getValueType(); + MVT VT = Val.getValueType(); if (Alignment == 0) { // Ensure that codegen never sees alignment 0 const Type *Ty = 0; if (VT != MVT::iPTR) { - Ty = MVT::getTypeForValueType(VT); + Ty = VT.getTypeForMVT(); } else if (SV) { const PointerType *PT = dyn_cast<PointerType>(SV->getType()); assert(PT && "Value for store must be a pointer"); @@ -3139,7 +3132,7 @@ SDOperand SelectionDAG::getStore(SDOperand Chain, SDOperand Val, AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); ID.AddInteger(ISD::UNINDEXED); ID.AddInteger(false); - ID.AddInteger((unsigned int)VT); + ID.AddInteger(VT.V); ID.AddInteger(Alignment); ID.AddInteger(isVolatile); void *IP = 0; @@ -3154,22 +3147,22 @@ SDOperand SelectionDAG::getStore(SDOperand Chain, SDOperand Val, SDOperand SelectionDAG::getTruncStore(SDOperand Chain, SDOperand Val, SDOperand Ptr, const Value *SV, - int SVOffset, MVT::ValueType SVT, + int SVOffset, MVT SVT, bool isVolatile, unsigned Alignment) { - MVT::ValueType VT = Val.getValueType(); + MVT VT = Val.getValueType(); if (VT == SVT) return getStore(Chain, Val, Ptr, SV, SVOffset, isVolatile, Alignment); - assert(MVT::getSizeInBits(VT) > MVT::getSizeInBits(SVT) && + assert(VT.getSizeInBits() > SVT.getSizeInBits() && "Not a truncation?"); - assert(MVT::isInteger(VT) == MVT::isInteger(SVT) && + assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!"); if (Alignment == 0) { // Ensure that codegen never sees alignment 0 const Type *Ty = 0; if (VT != MVT::iPTR) { - Ty = MVT::getTypeForValueType(VT); + Ty = VT.getTypeForMVT(); } else if (SV) { const PointerType *PT = dyn_cast<PointerType>(SV->getType()); assert(PT && "Value for store must be a pointer"); @@ -3185,7 +3178,7 @@ SDOperand SelectionDAG::getTruncStore(SDOperand Chain, SDOperand Val, AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); ID.AddInteger(ISD::UNINDEXED); ID.AddInteger(1); - ID.AddInteger((unsigned int)SVT); + ID.AddInteger(SVT.V); ID.AddInteger(Alignment); ID.AddInteger(isVolatile); void *IP = 0; @@ -3210,7 +3203,7 @@ SelectionDAG::getIndexedStore(SDOperand OrigStore, SDOperand Base, AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); ID.AddInteger(AM); ID.AddInteger(ST->isTruncatingStore()); - ID.AddInteger((unsigned int)(ST->getMemoryVT())); + ID.AddInteger(ST->getMemoryVT().V); ID.AddInteger(ST->getAlignment()); ID.AddInteger(ST->isVolatile()); void *IP = 0; @@ -3225,14 +3218,14 @@ SelectionDAG::getIndexedStore(SDOperand OrigStore, SDOperand Base, return SDOperand(N, 0); } -SDOperand SelectionDAG::getVAArg(MVT::ValueType VT, +SDOperand SelectionDAG::getVAArg(MVT VT, SDOperand Chain, SDOperand Ptr, SDOperand SV) { SDOperand Ops[] = { Chain, Ptr, SV }; return getNode(ISD::VAARG, getVTList(VT, MVT::Other), Ops, 3); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, +SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDOperandPtr Ops, unsigned NumOps) { switch (NumOps) { case 0: return getNode(Opcode, VT); @@ -3281,14 +3274,14 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT, } SDOperand SelectionDAG::getNode(unsigned Opcode, - std::vector<MVT::ValueType> &ResultTys, + std::vector<MVT> &ResultTys, SDOperandPtr Ops, unsigned NumOps) { return getNode(Opcode, getNodeValueTypes(ResultTys), ResultTys.size(), Ops, NumOps); } SDOperand SelectionDAG::getNode(unsigned Opcode, - const MVT::ValueType *VTs, unsigned NumVTs, + const MVT *VTs, unsigned NumVTs, SDOperandPtr Ops, unsigned NumOps) { if (NumVTs == 1) return getNode(Opcode, VTs[0], Ops, NumOps); @@ -3315,7 +3308,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { // If the and is only masking out bits that cannot effect the shift, // eliminate the and. - unsigned NumBits = MVT::getSizeInBits(VT)*2; + unsigned NumBits = VT.getSizeInBits()*2; if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) return getNode(Opcode, VT, N1, N2, N3.getOperand(0)); } @@ -3390,31 +3383,31 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, return getNode(Opcode, VTList, Ops, 5); } -SDVTList SelectionDAG::getVTList(MVT::ValueType VT) { +SDVTList SelectionDAG::getVTList(MVT VT) { return makeVTList(SDNode::getValueTypeList(VT), 1); } -SDVTList SelectionDAG::getVTList(MVT::ValueType VT1, MVT::ValueType VT2) { - for (std::list<std::vector<MVT::ValueType> >::iterator I = VTList.begin(), +SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2) { + for (std::list<std::vector<MVT> >::iterator I = VTList.begin(), E = VTList.end(); I != E; ++I) { if (I->size() == 2 && (*I)[0] == VT1 && (*I)[1] == VT2) return makeVTList(&(*I)[0], 2); } - std::vector<MVT::ValueType> V; + std::vector<MVT> V; V.push_back(VT1); V.push_back(VT2); VTList.push_front(V); return makeVTList(&(*VTList.begin())[0], 2); } -SDVTList SelectionDAG::getVTList(MVT::ValueType VT1, MVT::ValueType VT2, - MVT::ValueType VT3) { - for (std::list<std::vector<MVT::ValueType> >::iterator I = VTList.begin(), +SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2, + MVT VT3) { + for (std::list<std::vector<MVT> >::iterator I = VTList.begin(), E = VTList.end(); I != E; ++I) { if (I->size() == 3 && (*I)[0] == VT1 && (*I)[1] == VT2 && (*I)[2] == VT3) return makeVTList(&(*I)[0], 3); } - std::vector<MVT::ValueType> V; + std::vector<MVT> V; V.push_back(VT1); V.push_back(VT2); V.push_back(VT3); @@ -3422,7 +3415,7 @@ SDVTList SelectionDAG::getVTList(MVT::ValueType VT1, MVT::ValueType VT2, return makeVTList(&(*VTList.begin())[0], 3); } -SDVTList SelectionDAG::getVTList(const MVT::ValueType *VTs, unsigned NumVTs) { +SDVTList SelectionDAG::getVTList(const MVT *VTs, unsigned NumVTs) { switch (NumVTs) { case 0: assert(0 && "Cannot have nodes without results!"); case 1: return getVTList(VTs[0]); @@ -3431,7 +3424,7 @@ SDVTList SelectionDAG::getVTList(const MVT::ValueType *VTs, unsigned NumVTs) { default: break; } - for (std::list<std::vector<MVT::ValueType> >::iterator I = VTList.begin(), + for (std::list<std::vector<MVT> >::iterator I = VTList.begin(), E = VTList.end(); I != E; ++I) { if (I->size() != NumVTs || VTs[0] != (*I)[0] || VTs[1] != (*I)[1]) continue; @@ -3445,7 +3438,7 @@ SDVTList SelectionDAG::getVTList(const MVT::ValueType *VTs, unsigned NumVTs) { return makeVTList(&*I->begin(), NumVTs); } - VTList.push_front(std::vector<MVT::ValueType>(VTs, VTs+NumVTs)); + VTList.push_front(std::vector<MVT>(VTs, VTs+NumVTs)); return makeVTList(&*VTList.begin()->begin(), NumVTs); } @@ -3628,7 +3621,7 @@ void SDNode::MorphNodeTo(unsigned Opc, SDVTList L, /// node of the specified opcode and operands, it returns that node instead of /// the current one. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, - MVT::ValueType VT) { + MVT VT) { SDVTList VTs = getVTList(VT); FoldingSetNodeID ID; AddNodeIDNode(ID, ISD::BUILTIN_OP_END+TargetOpc, VTs, (SDOperand*)0, 0); @@ -3645,7 +3638,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, - MVT::ValueType VT, SDOperand Op1) { + MVT VT, SDOperand Op1) { // If an identical node already exists, use it. SDVTList VTs = getVTList(VT); SDOperand Ops[] = { Op1 }; @@ -3663,7 +3656,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, - MVT::ValueType VT, SDOperand Op1, + MVT VT, SDOperand Op1, SDOperand Op2) { // If an identical node already exists, use it. SDVTList VTs = getVTList(VT); @@ -3684,7 +3677,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, - MVT::ValueType VT, SDOperand Op1, + MVT VT, SDOperand Op1, SDOperand Op2, SDOperand Op3) { // If an identical node already exists, use it. SDVTList VTs = getVTList(VT); @@ -3704,7 +3697,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, - MVT::ValueType VT, SDOperandPtr Ops, + MVT VT, SDOperandPtr Ops, unsigned NumOps) { // If an identical node already exists, use it. SDVTList VTs = getVTList(VT); @@ -3722,7 +3715,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, - MVT::ValueType VT1, MVT::ValueType VT2, + MVT VT1, MVT VT2, SDOperand Op1, SDOperand Op2) { SDVTList VTs = getVTList(VT1, VT2); FoldingSetNodeID ID; @@ -3739,7 +3732,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, - MVT::ValueType VT1, MVT::ValueType VT2, + MVT VT1, MVT VT2, SDOperand Op1, SDOperand Op2, SDOperand Op3) { // If an identical node already exists, use it. @@ -3765,94 +3758,87 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, /// Note that getTargetNode returns the resultant node. If there is already a /// node of the specified opcode and operands, it returns that node instead of /// the current one. -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT) { +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT) { return getNode(ISD::BUILTIN_OP_END+Opcode, VT).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT, - SDOperand Op1) { +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT, SDOperand Op1) { return getNode(ISD::BUILTIN_OP_END+Opcode, VT, Op1).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT, +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT, SDOperand Op1, SDOperand Op2) { return getNode(ISD::BUILTIN_OP_END+Opcode, VT, Op1, Op2).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT, +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT, SDOperand Op1, SDOperand Op2, SDOperand Op3) { return getNode(ISD::BUILTIN_OP_END+Opcode, VT, Op1, Op2, Op3).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT, +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT, SDOperandPtr Ops, unsigned NumOps) { return getNode(ISD::BUILTIN_OP_END+Opcode, VT, Ops, NumOps).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2) { - const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2); +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2) { + const MVT *VTs = getNodeValueTypes(VT1, VT2); SDOperand Op; return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 2, &Op, 0).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, SDOperand Op1) { - const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2); +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, + MVT VT2, SDOperand Op1) { + const MVT *VTs = getNodeValueTypes(VT1, VT2); return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 2, &Op1, 1).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, SDOperand Op1, +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, + MVT VT2, SDOperand Op1, SDOperand Op2) { - const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2); + const MVT *VTs = getNodeValueTypes(VT1, VT2); SDOperand Ops[] = { Op1, Op2 }; return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 2, Ops, 2).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, SDOperand Op1, +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, + MVT VT2, SDOperand Op1, SDOperand Op2, SDOperand Op3) { - const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2); + const MVT *VTs = getNodeValueTypes(VT1, VT2); SDOperand Ops[] = { Op1, Op2, Op3 }; return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 2, Ops, 3).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, SDOperandPtr Ops, unsigned NumOps) { - const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2); + const MVT *VTs = getNodeValueTypes(VT1, VT2); return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 2, Ops, NumOps).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, MVT::ValueType VT3, +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3, SDOperand Op1, SDOperand Op2) { - const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2, VT3); + const MVT *VTs = getNodeValueTypes(VT1, VT2, VT3); SDOperand Ops[] = { Op1, Op2 }; return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 3, Ops, 2).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, MVT::ValueType VT3, +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3, SDOperand Op1, SDOperand Op2, SDOperand Op3) { - const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2, VT3); + const MVT *VTs = getNodeValueTypes(VT1, VT2, VT3); SDOperand Ops[] = { Op1, Op2, Op3 }; return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 3, Ops, 3).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, MVT::ValueType VT3, +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3, SDOperandPtr Ops, unsigned NumOps) { - const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2, VT3); + const MVT *VTs = getNodeValueTypes(VT1, VT2, VT3); return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 3, Ops, NumOps).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, - MVT::ValueType VT2, MVT::ValueType VT3, - MVT::ValueType VT4, +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, + MVT VT2, MVT VT3, MVT VT4, SDOperandPtr Ops, unsigned NumOps) { - std::vector<MVT::ValueType> VTList; + std::vector<MVT> VTList; VTList.push_back(VT1); VTList.push_back(VT2); VTList.push_back(VT3); VTList.push_back(VT4); - const MVT::ValueType *VTs = getNodeValueTypes(VTList); + const MVT *VTs = getNodeValueTypes(VTList); return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 4, Ops, NumOps).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, - std::vector<MVT::ValueType> &ResultTys, + std::vector<MVT> &ResultTys, SDOperandPtr Ops, unsigned NumOps) { - const MVT::ValueType *VTs = getNodeValueTypes(ResultTys); + const MVT *VTs = getNodeValueTypes(ResultTys); return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, ResultTys.size(), Ops, NumOps).Val; } @@ -4200,7 +4186,7 @@ HandleSDNode::~HandleSDNode() { } GlobalAddressSDNode::GlobalAddressSDNode(bool isTarget, const GlobalValue *GA, - MVT::ValueType VT, int o) + MVT VT, int o) : SDNode(isa<GlobalVariable>(GA) && cast<GlobalVariable>(GA)->isThreadLocal() ? // Thread Local @@ -4214,7 +4200,7 @@ GlobalAddressSDNode::GlobalAddressSDNode(bool isTarget, const GlobalValue *GA, /// getMemOperand - Return a MachineMemOperand object describing the memory /// reference performed by this load or store. MachineMemOperand LSBaseSDNode::getMemOperand() const { - int Size = (MVT::getSizeInBits(getMemoryVT()) + 7) >> 3; + int Size = (getMemoryVT().getSizeInBits() + 7) >> 3; int Flags = getOpcode() == ISD::LOAD ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore; @@ -4240,14 +4226,14 @@ void SDNode::Profile(FoldingSetNodeID &ID) { /// getValueTypeList - Return a pointer to the specified value type. /// -const MVT::ValueType *SDNode::getValueTypeList(MVT::ValueType VT) { - if (MVT::isExtendedVT(VT)) { - static std::set<MVT::ValueType> EVTs; +const MVT *SDNode::getValueTypeList(MVT VT) { + if (VT.isExtended()) { + static std::set<MVT> EVTs; return &(*EVTs.insert(VT).first); } else { - static MVT::ValueType VTs[MVT::LAST_VALUETYPE]; - VTs[VT] = VT; - return &VTs[VT]; + static MVT VTs[MVT::LAST_VALUETYPE]; + VTs[VT.getSimpleVT()] = VT; + return &VTs[VT.getSimpleVT()]; } } @@ -4684,7 +4670,7 @@ void SDNode::dump(const SelectionDAG *G) const { if (getValueType(i) == MVT::Other) cerr << "ch"; else - cerr << MVT::getValueTypeString(getValueType(i)); + cerr << getValueType(i).getMVTString(); } cerr << " = " << getOperationName(G); @@ -4773,7 +4759,7 @@ void SDNode::dump(const SelectionDAG *G) const { } else if (const ARG_FLAGSSDNode *N = dyn_cast<ARG_FLAGSSDNode>(this)) { cerr << N->getArgFlags().getArgFlagsString(); } else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) { - cerr << ":" << MVT::getValueTypeString(N->getVT()); + cerr << ":" << N->getVT().getMVTString(); } else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) { const Value *SrcValue = LD->getSrcValue(); int SrcOffset = LD->getSrcValueOffset(); @@ -4798,7 +4784,7 @@ void SDNode::dump(const SelectionDAG *G) const { break; } if (doExt) - cerr << MVT::getValueTypeString(LD->getMemoryVT()) << ">"; + cerr << LD->getMemoryVT().getMVTString() << ">"; const char *AM = getIndexedModeName(LD->getAddressingMode()); if (*AM) @@ -4818,7 +4804,7 @@ void SDNode::dump(const SelectionDAG *G) const { if (ST->isTruncatingStore()) cerr << " <trunc " - << MVT::getValueTypeString(ST->getMemoryVT()) << ">"; + << ST->getMemoryVT().getMVTString() << ">"; const char *AM = getIndexedModeName(ST->getAddressingMode()); if (*AM) diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index a76e514..1dc6753 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -88,10 +88,10 @@ defaultListDAGScheduler("default", " Best scheduler for the target", namespace { struct SDISelAsmOperandInfo; } /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of -/// MVT::ValueTypes that represent all the individual underlying +/// MVTs that represent all the individual underlying /// non-aggregate types that comprise it. static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty, - SmallVectorImpl<MVT::ValueType> &ValueVTs) { + SmallVectorImpl<MVT> &ValueVTs) { // Given a struct type, recursively traverse the elements. if (const StructType *STy = dyn_cast<StructType>(Ty)) { for (StructType::element_iterator EI = STy->element_begin(), @@ -107,7 +107,7 @@ static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty, ComputeValueVTs(TLI, EltTy, ValueVTs); return; } - // Base case: we can get an MVT::ValueType for this LLVM IR type. + // Base case: we can get an MVT for this LLVM IR type. ValueVTs.push_back(TLI.getValueType(Ty)); } @@ -129,7 +129,7 @@ namespace { /// ValueVTs - The value types of the values, which may not be legal, and /// may need be promoted or synthesized from one or more registers. /// - SmallVector<MVT::ValueType, 4> ValueVTs; + SmallVector<MVT, 4> ValueVTs; /// RegVTs - The value types of the registers. This is the same size as /// ValueVTs and it records, for each value, what the type of the assigned @@ -140,7 +140,7 @@ namespace { /// getRegisterType member function, however when with physical registers /// it is necessary to have a separate record of the types. /// - SmallVector<MVT::ValueType, 4> RegVTs; + SmallVector<MVT, 4> RegVTs; /// Regs - This list holds the registers assigned to the values. /// Each legal or promoted value requires one register, and each @@ -152,21 +152,21 @@ namespace { RegsForValue(const TargetLowering &tli, const SmallVector<unsigned, 4> ®s, - MVT::ValueType regvt, MVT::ValueType valuevt) + MVT regvt, MVT valuevt) : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {} RegsForValue(const TargetLowering &tli, const SmallVector<unsigned, 4> ®s, - const SmallVector<MVT::ValueType, 4> ®vts, - const SmallVector<MVT::ValueType, 4> &valuevts) + const SmallVector<MVT, 4> ®vts, + const SmallVector<MVT, 4> &valuevts) : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {} RegsForValue(const TargetLowering &tli, unsigned Reg, const Type *Ty) : TLI(&tli) { ComputeValueVTs(tli, Ty, ValueVTs); for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) { - MVT::ValueType ValueVT = ValueVTs[Value]; + MVT ValueVT = ValueVTs[Value]; unsigned NumRegs = TLI->getNumRegisters(ValueVT); - MVT::ValueType RegisterVT = TLI->getRegisterType(ValueVT); + MVT RegisterVT = TLI->getRegisterType(ValueVT); for (unsigned i = 0; i != NumRegs; ++i) Regs.push_back(Reg + i); RegVTs.push_back(RegisterVT); @@ -254,7 +254,7 @@ namespace llvm { SmallSet<Instruction*, 8> CatchInfoFound; #endif - unsigned MakeReg(MVT::ValueType VT) { + unsigned MakeReg(MVT VT) { return RegInfo.createVirtualRegister(TLI.getRegClassFor(VT)); } @@ -359,7 +359,7 @@ FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli, for (BasicBlock::iterator I = BB->begin();(PN = dyn_cast<PHINode>(I)); ++I){ if (PN->use_empty()) continue; - MVT::ValueType VT = TLI.getValueType(PN->getType()); + MVT VT = TLI.getValueType(PN->getType()); unsigned NumRegisters = TLI.getNumRegisters(VT); unsigned PHIReg = ValueMap[PN]; assert(PHIReg && "PHI node does not have an assigned virtual register!"); @@ -378,13 +378,13 @@ FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli, /// will assign registers for each member or element. /// unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) { - SmallVector<MVT::ValueType, 4> ValueVTs; + SmallVector<MVT, 4> ValueVTs; ComputeValueVTs(TLI, V->getType(), ValueVTs); unsigned FirstReg = 0; for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) { - MVT::ValueType ValueVT = ValueVTs[Value]; - MVT::ValueType RegisterVT = TLI.getRegisterType(ValueVT); + MVT ValueVT = ValueVTs[Value]; + MVT RegisterVT = TLI.getRegisterType(ValueVT); unsigned NumRegs = TLI.getNumRegisters(ValueVT); for (unsigned i = 0; i != NumRegs; ++i) { @@ -751,8 +751,8 @@ private: static SDOperand getCopyFromParts(SelectionDAG &DAG, const SDOperand *Parts, unsigned NumParts, - MVT::ValueType PartVT, - MVT::ValueType ValueVT, + MVT PartVT, + MVT ValueVT, ISD::NodeType AssertOp = ISD::DELETED_NODE) { assert(NumParts > 0 && "No parts to assemble!"); TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -760,20 +760,20 @@ static SDOperand getCopyFromParts(SelectionDAG &DAG, if (NumParts > 1) { // Assemble the value from multiple parts. - if (!MVT::isVector(ValueVT)) { - unsigned PartBits = MVT::getSizeInBits(PartVT); - unsigned ValueBits = MVT::getSizeInBits(ValueVT); + if (!ValueVT.isVector()) { + unsigned PartBits = PartVT.getSizeInBits(); + unsigned ValueBits = ValueVT.getSizeInBits(); // Assemble the power of 2 part. unsigned RoundParts = NumParts & (NumParts - 1) ? 1 << Log2_32(NumParts) : NumParts; unsigned RoundBits = PartBits * RoundParts; - MVT::ValueType RoundVT = RoundBits == ValueBits ? - ValueVT : MVT::getIntegerType(RoundBits); + MVT RoundVT = RoundBits == ValueBits ? + ValueVT : MVT::getIntegerVT(RoundBits); SDOperand Lo, Hi; if (RoundParts > 2) { - MVT::ValueType HalfVT = MVT::getIntegerType(RoundBits/2); + MVT HalfVT = MVT::getIntegerVT(RoundBits/2); Lo = getCopyFromParts(DAG, Parts, RoundParts/2, PartVT, HalfVT); Hi = getCopyFromParts(DAG, Parts+RoundParts/2, RoundParts/2, PartVT, HalfVT); @@ -788,24 +788,24 @@ static SDOperand getCopyFromParts(SelectionDAG &DAG, if (RoundParts < NumParts) { // Assemble the trailing non-power-of-2 part. unsigned OddParts = NumParts - RoundParts; - MVT::ValueType OddVT = MVT::getIntegerType(OddParts * PartBits); + MVT OddVT = MVT::getIntegerVT(OddParts * PartBits); Hi = getCopyFromParts(DAG, Parts+RoundParts, OddParts, PartVT, OddVT); // Combine the round and odd parts. Lo = Val; if (TLI.isBigEndian()) std::swap(Lo, Hi); - MVT::ValueType TotalVT = MVT::getIntegerType(NumParts * PartBits); + MVT TotalVT = MVT::getIntegerVT(NumParts * PartBits); Hi = DAG.getNode(ISD::ANY_EXTEND, TotalVT, Hi); Hi = DAG.getNode(ISD::SHL, TotalVT, Hi, - DAG.getConstant(MVT::getSizeInBits(Lo.getValueType()), + DAG.getConstant(Lo.getValueType().getSizeInBits(), TLI.getShiftAmountTy())); Lo = DAG.getNode(ISD::ZERO_EXTEND, TotalVT, Lo); Val = DAG.getNode(ISD::OR, TotalVT, Lo, Hi); } } else { // Handle a multi-element vector. - MVT::ValueType IntermediateVT, RegisterVT; + MVT IntermediateVT, RegisterVT; unsigned NumIntermediates; unsigned NumRegs = TLI.getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates, @@ -837,7 +837,7 @@ static SDOperand getCopyFromParts(SelectionDAG &DAG, // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate // operands. - Val = DAG.getNode(MVT::isVector(IntermediateVT) ? + Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, ValueVT, &Ops[0], NumIntermediates); } @@ -849,21 +849,21 @@ static SDOperand getCopyFromParts(SelectionDAG &DAG, if (PartVT == ValueVT) return Val; - if (MVT::isVector(PartVT)) { - assert(MVT::isVector(ValueVT) && "Unknown vector conversion!"); + if (PartVT.isVector()) { + assert(ValueVT.isVector() && "Unknown vector conversion!"); return DAG.getNode(ISD::BIT_CONVERT, ValueVT, Val); } - if (MVT::isVector(ValueVT)) { - assert(MVT::getVectorElementType(ValueVT) == PartVT && - MVT::getVectorNumElements(ValueVT) == 1 && + if (ValueVT.isVector()) { + assert(ValueVT.getVectorElementType() == PartVT && + ValueVT.getVectorNumElements() == 1 && "Only trivial scalar-to-vector conversions should get here!"); return DAG.getNode(ISD::BUILD_VECTOR, ValueVT, Val); } - if (MVT::isInteger(PartVT) && - MVT::isInteger(ValueVT)) { - if (MVT::getSizeInBits(ValueVT) < MVT::getSizeInBits(PartVT)) { + if (PartVT.isInteger() && + ValueVT.isInteger()) { + if (ValueVT.getSizeInBits() < PartVT.getSizeInBits()) { // For a truncate, see if we have any information to // indicate whether the truncated bits will always be // zero or sign-extension. @@ -876,7 +876,7 @@ static SDOperand getCopyFromParts(SelectionDAG &DAG, } } - if (MVT::isFloatingPoint(PartVT) && MVT::isFloatingPoint(ValueVT)) { + if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { if (ValueVT < Val.getValueType()) // FP_ROUND's are always exact here. return DAG.getNode(ISD::FP_ROUND, ValueVT, Val, @@ -884,7 +884,7 @@ static SDOperand getCopyFromParts(SelectionDAG &DAG, return DAG.getNode(ISD::FP_EXTEND, ValueVT, Val); } - if (MVT::getSizeInBits(PartVT) == MVT::getSizeInBits(ValueVT)) + if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) return DAG.getNode(ISD::BIT_CONVERT, ValueVT, Val); assert(0 && "Unknown mismatch!"); @@ -898,43 +898,43 @@ static void getCopyToParts(SelectionDAG &DAG, SDOperand Val, SDOperand *Parts, unsigned NumParts, - MVT::ValueType PartVT, + MVT PartVT, ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { TargetLowering &TLI = DAG.getTargetLoweringInfo(); - MVT::ValueType PtrVT = TLI.getPointerTy(); - MVT::ValueType ValueVT = Val.getValueType(); - unsigned PartBits = MVT::getSizeInBits(PartVT); + MVT PtrVT = TLI.getPointerTy(); + MVT ValueVT = Val.getValueType(); + unsigned PartBits = PartVT.getSizeInBits(); assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!"); if (!NumParts) return; - if (!MVT::isVector(ValueVT)) { + if (!ValueVT.isVector()) { if (PartVT == ValueVT) { assert(NumParts == 1 && "No-op copy with multiple parts!"); Parts[0] = Val; return; } - if (NumParts * PartBits > MVT::getSizeInBits(ValueVT)) { + if (NumParts * PartBits > ValueVT.getSizeInBits()) { // If the parts cover more bits than the value has, promote the value. - if (MVT::isFloatingPoint(PartVT) && MVT::isFloatingPoint(ValueVT)) { + if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { assert(NumParts == 1 && "Do not know what to promote to!"); Val = DAG.getNode(ISD::FP_EXTEND, PartVT, Val); - } else if (MVT::isInteger(PartVT) && MVT::isInteger(ValueVT)) { - ValueVT = MVT::getIntegerType(NumParts * PartBits); + } else if (PartVT.isInteger() && ValueVT.isInteger()) { + ValueVT = MVT::getIntegerVT(NumParts * PartBits); Val = DAG.getNode(ExtendKind, ValueVT, Val); } else { assert(0 && "Unknown mismatch!"); } - } else if (PartBits == MVT::getSizeInBits(ValueVT)) { + } else if (PartBits == ValueVT.getSizeInBits()) { // Different types of the same size. assert(NumParts == 1 && PartVT != ValueVT); Val = DAG.getNode(ISD::BIT_CONVERT, PartVT, Val); - } else if (NumParts * PartBits < MVT::getSizeInBits(ValueVT)) { + } else if (NumParts * PartBits < ValueVT.getSizeInBits()) { // If the parts cover less bits than value has, truncate the value. - if (MVT::isInteger(PartVT) && MVT::isInteger(ValueVT)) { - ValueVT = MVT::getIntegerType(NumParts * PartBits); + if (PartVT.isInteger() && ValueVT.isInteger()) { + ValueVT = MVT::getIntegerVT(NumParts * PartBits); Val = DAG.getNode(ISD::TRUNCATE, ValueVT, Val); } else { assert(0 && "Unknown mismatch!"); @@ -943,7 +943,7 @@ static void getCopyToParts(SelectionDAG &DAG, // The value may have changed - recompute ValueVT. ValueVT = Val.getValueType(); - assert(NumParts * PartBits == MVT::getSizeInBits(ValueVT) && + assert(NumParts * PartBits == ValueVT.getSizeInBits() && "Failed to tile the value with PartVT!"); if (NumParts == 1) { @@ -955,7 +955,7 @@ static void getCopyToParts(SelectionDAG &DAG, // Expand the value into multiple parts. if (NumParts & (NumParts - 1)) { // The number of parts is not a power of 2. Split off and copy the tail. - assert(MVT::isInteger(PartVT) && MVT::isInteger(ValueVT) && + assert(PartVT.isInteger() && ValueVT.isInteger() && "Do not know what to expand to!"); unsigned RoundParts = 1 << Log2_32(NumParts); unsigned RoundBits = RoundParts * PartBits; @@ -968,19 +968,19 @@ static void getCopyToParts(SelectionDAG &DAG, // The odd parts were reversed by getCopyToParts - unreverse them. std::reverse(Parts + RoundParts, Parts + NumParts); NumParts = RoundParts; - ValueVT = MVT::getIntegerType(NumParts * PartBits); + ValueVT = MVT::getIntegerVT(NumParts * PartBits); Val = DAG.getNode(ISD::TRUNCATE, ValueVT, Val); } // The number of parts is a power of 2. Repeatedly bisect the value using // EXTRACT_ELEMENT. Parts[0] = DAG.getNode(ISD::BIT_CONVERT, - MVT::getIntegerType(MVT::getSizeInBits(ValueVT)), + MVT::getIntegerVT(ValueVT.getSizeInBits()), Val); for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) { for (unsigned i = 0; i < NumParts; i += StepSize) { unsigned ThisBits = StepSize * PartBits / 2; - MVT::ValueType ThisVT = MVT::getIntegerType (ThisBits); + MVT ThisVT = MVT::getIntegerVT (ThisBits); SDOperand &Part0 = Parts[i]; SDOperand &Part1 = Parts[i+StepSize/2]; @@ -1005,11 +1005,11 @@ static void getCopyToParts(SelectionDAG &DAG, // Vector ValueVT. if (NumParts == 1) { if (PartVT != ValueVT) { - if (MVT::isVector(PartVT)) { + if (PartVT.isVector()) { Val = DAG.getNode(ISD::BIT_CONVERT, PartVT, Val); } else { - assert(MVT::getVectorElementType(ValueVT) == PartVT && - MVT::getVectorNumElements(ValueVT) == 1 && + assert(ValueVT.getVectorElementType() == PartVT && + ValueVT.getVectorNumElements() == 1 && "Only trivial vector-to-scalar conversions should get here!"); Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, PartVT, Val, DAG.getConstant(0, PtrVT)); @@ -1021,13 +1021,13 @@ static void getCopyToParts(SelectionDAG &DAG, } // Handle a multi-element vector. - MVT::ValueType IntermediateVT, RegisterVT; + MVT IntermediateVT, RegisterVT; unsigned NumIntermediates; unsigned NumRegs = DAG.getTargetLoweringInfo() .getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates, RegisterVT); - unsigned NumElements = MVT::getVectorNumElements(ValueVT); + unsigned NumElements = ValueVT.getVectorNumElements(); assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); NumParts = NumRegs; // Silence a compiler warning. @@ -1036,7 +1036,7 @@ static void getCopyToParts(SelectionDAG &DAG, // Split the vector into intermediate operands. SmallVector<SDOperand, 8> Ops(NumIntermediates); for (unsigned i = 0; i != NumIntermediates; ++i) - if (MVT::isVector(IntermediateVT)) + if (IntermediateVT.isVector()) Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, IntermediateVT, Val, DAG.getConstant(i * (NumElements / NumIntermediates), @@ -1069,7 +1069,7 @@ SDOperand SelectionDAGLowering::getValue(const Value *V) { if (N.Val) return N; if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) { - MVT::ValueType VT = TLI.getValueType(V->getType(), true); + MVT VT = TLI.getValueType(V->getType(), true); if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) return N = DAG.getConstant(CI->getValue(), VT); @@ -1105,12 +1105,12 @@ SDOperand SelectionDAGLowering::getValue(const Value *V) { } else { assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && "Unknown vector constant!"); - MVT::ValueType EltVT = TLI.getValueType(VecTy->getElementType()); + MVT EltVT = TLI.getValueType(VecTy->getElementType()); SDOperand Op; if (isa<UndefValue>(C)) Op = DAG.getNode(ISD::UNDEF, EltVT); - else if (MVT::isFloatingPoint(EltVT)) + else if (EltVT.isFloatingPoint()) Op = DAG.getConstantFP(0, EltVT); else Op = DAG.getConstant(0, EltVT); @@ -1149,18 +1149,18 @@ void SelectionDAGLowering::visitRet(ReturnInst &I) { NewValues.push_back(getControlRoot()); for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { SDOperand RetOp = getValue(I.getOperand(i)); - MVT::ValueType VT = RetOp.getValueType(); + MVT VT = RetOp.getValueType(); // FIXME: C calling convention requires the return type to be promoted to // at least 32-bit. But this is not necessary for non-C calling conventions. - if (MVT::isInteger(VT)) { - MVT::ValueType MinVT = TLI.getRegisterType(MVT::i32); - if (MVT::getSizeInBits(VT) < MVT::getSizeInBits(MinVT)) + if (VT.isInteger()) { + MVT MinVT = TLI.getRegisterType(MVT::i32); + if (VT.getSizeInBits() < MinVT.getSizeInBits()) VT = MinVT; } unsigned NumParts = TLI.getNumRegisters(VT); - MVT::ValueType PartVT = TLI.getRegisterType(VT); + MVT PartVT = TLI.getRegisterType(VT); SmallVector<SDOperand, 4> Parts(NumParts); ISD::NodeType ExtendKind = ISD::ANY_EXTEND; @@ -1475,7 +1475,7 @@ void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { uint64_t High = cast<ConstantInt>(CB.CmpRHS)->getSExtValue(); SDOperand CmpOp = getValue(CB.CmpMHS); - MVT::ValueType VT = CmpOp.getValueType(); + MVT VT = CmpOp.getValueType(); if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { Cond = DAG.getSetCC(MVT::i1, CmpOp, DAG.getConstant(High, VT), ISD::SETLE); @@ -1517,7 +1517,7 @@ void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) { // Emit the code for the jump table assert(JT.Reg != -1U && "Should lower JT Header first!"); - MVT::ValueType PTy = TLI.getPointerTy(); + MVT PTy = TLI.getPointerTy(); SDOperand Index = DAG.getCopyFromReg(getControlRoot(), JT.Reg, PTy); SDOperand Table = DAG.getJumpTable(JT.JTI, PTy); DAG.setRoot(DAG.getNode(ISD::BR_JT, MVT::Other, Index.getValue(1), @@ -1533,7 +1533,7 @@ void SelectionDAGLowering::visitJumpTableHeader(SelectionDAGISel::JumpTable &JT, // and conditional branch to default mbb if the result is greater than the // difference between smallest and largest cases. SDOperand SwitchOp = getValue(JTH.SValue); - MVT::ValueType VT = SwitchOp.getValueType(); + MVT VT = SwitchOp.getValueType(); SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, DAG.getConstant(JTH.First, VT)); @@ -1542,7 +1542,7 @@ void SelectionDAGLowering::visitJumpTableHeader(SelectionDAGISel::JumpTable &JT, // register so it can be used as an index into the jump table in a // subsequent basic block. This value may be smaller or larger than the // target's pointer type, and therefore require extension or truncating. - if (MVT::getSizeInBits(VT) > MVT::getSizeInBits(TLI.getPointerTy())) + if (VT.getSizeInBits() > TLI.getPointerTy().getSizeInBits()) SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB); else SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB); @@ -1582,7 +1582,7 @@ void SelectionDAGLowering::visitJumpTableHeader(SelectionDAGISel::JumpTable &JT, void SelectionDAGLowering::visitBitTestHeader(SelectionDAGISel::BitTestBlock &B) { // Subtract the minimum value SDOperand SwitchOp = getValue(B.SValue); - MVT::ValueType VT = SwitchOp.getValueType(); + MVT VT = SwitchOp.getValueType(); SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, DAG.getConstant(B.First, VT)); @@ -1592,7 +1592,7 @@ void SelectionDAGLowering::visitBitTestHeader(SelectionDAGISel::BitTestBlock &B) ISD::SETUGT); SDOperand ShiftOp; - if (MVT::getSizeInBits(VT) > MVT::getSizeInBits(TLI.getShiftAmountTy())) + if (VT.getSizeInBits() > TLI.getShiftAmountTy().getSizeInBits()) ShiftOp = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), SUB); else ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getShiftAmountTy(), SUB); @@ -2005,7 +2005,7 @@ bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR, CaseRecVector& WorkList, Value* SV, MachineBasicBlock* Default){ - unsigned IntPtrBits = MVT::getSizeInBits(TLI.getPointerTy()); + unsigned IntPtrBits = TLI.getPointerTy().getSizeInBits(); Case& FrontCase = *CR.Range.first; Case& BackCase = *(CR.Range.second-1); @@ -2271,8 +2271,8 @@ void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) { SDOperand Op1 = getValue(I.getOperand(0)); SDOperand Op2 = getValue(I.getOperand(1)); - if (MVT::getSizeInBits(TLI.getShiftAmountTy()) < - MVT::getSizeInBits(Op2.getValueType())) + if (TLI.getShiftAmountTy().getSizeInBits() < + Op2.getValueType().getSizeInBits()) Op2 = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), Op2); else if (TLI.getShiftAmountTy() > Op2.getValueType()) Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2); @@ -2410,7 +2410,7 @@ void SelectionDAGLowering::visitVFCmp(User &I) { else Condition = FPC; - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getVSetCC(DestVT, Op1, Op2, Condition)); } @@ -2427,7 +2427,7 @@ void SelectionDAGLowering::visitSelect(User &I) { void SelectionDAGLowering::visitTrunc(User &I) { // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); } @@ -2435,7 +2435,7 @@ void SelectionDAGLowering::visitZExt(User &I) { // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). // ZExt also can't be a cast to bool for same reason. So, nothing much to do SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); } @@ -2443,49 +2443,49 @@ void SelectionDAGLowering::visitSExt(User &I) { // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). // SExt also can't be a cast to bool for same reason. So, nothing much to do SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N)); } void SelectionDAGLowering::visitFPTrunc(User &I) { // FPTrunc is never a no-op cast, no need to check SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N, DAG.getIntPtrConstant(0))); } void SelectionDAGLowering::visitFPExt(User &I){ // FPTrunc is never a no-op cast, no need to check SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N)); } void SelectionDAGLowering::visitFPToUI(User &I) { // FPToUI is never a no-op cast, no need to check SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N)); } void SelectionDAGLowering::visitFPToSI(User &I) { // FPToSI is never a no-op cast, no need to check SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N)); } void SelectionDAGLowering::visitUIToFP(User &I) { // UIToFP is never a no-op cast, no need to check SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N)); } void SelectionDAGLowering::visitSIToFP(User &I){ // UIToFP is never a no-op cast, no need to check SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N)); } @@ -2493,10 +2493,10 @@ void SelectionDAGLowering::visitPtrToInt(User &I) { // What to do depends on the size of the integer and the size of the pointer. // We can either truncate, zero extend, or no-op, accordingly. SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType SrcVT = N.getValueType(); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT SrcVT = N.getValueType(); + MVT DestVT = TLI.getValueType(I.getType()); SDOperand Result; - if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(SrcVT)) + if (DestVT.getSizeInBits() < SrcVT.getSizeInBits()) Result = DAG.getNode(ISD::TRUNCATE, DestVT, N); else // Note: ZERO_EXTEND can handle cases where the sizes are equal too @@ -2508,9 +2508,9 @@ void SelectionDAGLowering::visitIntToPtr(User &I) { // What to do depends on the size of the integer and the size of the pointer. // We can either truncate, zero extend, or no-op, accordingly. SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType SrcVT = N.getValueType(); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); - if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(SrcVT)) + MVT SrcVT = N.getValueType(); + MVT DestVT = TLI.getValueType(I.getType()); + if (DestVT.getSizeInBits() < SrcVT.getSizeInBits()) setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); else // Note: ZERO_EXTEND can handle cases where the sizes are equal too @@ -2519,7 +2519,7 @@ void SelectionDAGLowering::visitIntToPtr(User &I) { void SelectionDAGLowering::visitBitCast(User &I) { SDOperand N = getValue(I.getOperand(0)); - MVT::ValueType DestVT = TLI.getValueType(I.getType()); + MVT DestVT = TLI.getValueType(I.getType()); // BitCast assures us that source and destination are the same size so this // is either a BIT_CONVERT or a no-op. @@ -2640,7 +2640,7 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) { I.getAlignment()); SDOperand AllocSize = getValue(I.getArraySize()); - MVT::ValueType IntPtr = TLI.getPointerTy(); + MVT IntPtr = TLI.getPointerTy(); if (IntPtr < AllocSize.getValueType()) AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize); else if (IntPtr > AllocSize.getValueType()) @@ -2666,7 +2666,7 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) { DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1))); SDOperand Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) }; - const MVT::ValueType *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(), + const MVT *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(), MVT::Other); SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3); setValue(&I, DSA); @@ -2746,14 +2746,14 @@ void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, Ops.push_back(Op); } - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; if (I.getType() != Type::VoidTy) { - MVT::ValueType VT = TLI.getValueType(I.getType()); - if (MVT::isVector(VT)) { + MVT VT = TLI.getValueType(I.getType()); + if (VT.isVector()) { const VectorType *DestTy = cast<VectorType>(I.getType()); - MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); + MVT EltVT = TLI.getValueType(DestTy->getElementType()); - VT = MVT::getVectorType(EltVT, DestTy->getNumElements()); + VT = MVT::getVectorVT(EltVT, DestTy->getNumElements()); assert(VT != MVT::Other && "Intrinsic uses a non-legal type?"); } @@ -2763,7 +2763,7 @@ void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, if (HasChain) VTs.push_back(MVT::Other); - const MVT::ValueType *VTList = DAG.getNodeValueTypes(VTs); + const MVT *VTList = DAG.getNodeValueTypes(VTs); // Create the node. SDOperand Result; @@ -2786,7 +2786,7 @@ void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, } if (I.getType() != Type::VoidTy) { if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) { - MVT::ValueType VT = TLI.getValueType(PTy); + MVT VT = TLI.getValueType(PTy); Result = DAG.getNode(ISD::BIT_CONVERT, VT, Result); } setValue(&I, Result); @@ -3038,7 +3038,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { case Intrinsic::eh_selector_i32: case Intrinsic::eh_selector_i64: { MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); - MVT::ValueType VT = (Intrinsic == Intrinsic::eh_selector_i32 ? + MVT VT = (Intrinsic == Intrinsic::eh_selector_i32 ? MVT::i32 : MVT::i64); if (MMI) { @@ -3071,7 +3071,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { case Intrinsic::eh_typeid_for_i32: case Intrinsic::eh_typeid_for_i64: { MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); - MVT::ValueType VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ? + MVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ? MVT::i32 : MVT::i64); if (MMI) { @@ -3114,9 +3114,9 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { } case Intrinsic::eh_dwarf_cfa: { - MVT::ValueType VT = getValue(I.getOperand(1)).getValueType(); + MVT VT = getValue(I.getOperand(1)).getValueType(); SDOperand CfaArg; - if (MVT::getSizeInBits(VT) > MVT::getSizeInBits(TLI.getPointerTy())) + if (VT.getSizeInBits() > TLI.getPointerTy().getSizeInBits()) CfaArg = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), getValue(I.getOperand(1))); else @@ -3196,21 +3196,21 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { return 0; case Intrinsic::cttz: { SDOperand Arg = getValue(I.getOperand(1)); - MVT::ValueType Ty = Arg.getValueType(); + MVT Ty = Arg.getValueType(); SDOperand result = DAG.getNode(ISD::CTTZ, Ty, Arg); setValue(&I, result); return 0; } case Intrinsic::ctlz: { SDOperand Arg = getValue(I.getOperand(1)); - MVT::ValueType Ty = Arg.getValueType(); + MVT Ty = Arg.getValueType(); SDOperand result = DAG.getNode(ISD::CTLZ, Ty, Arg); setValue(&I, result); return 0; } case Intrinsic::ctpop: { SDOperand Arg = getValue(I.getOperand(1)); - MVT::ValueType Ty = Arg.getValueType(); + MVT Ty = Arg.getValueType(); SDOperand result = DAG.getNode(ISD::CTPOP, Ty, Arg); setValue(&I, result); return 0; @@ -3503,9 +3503,9 @@ SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG, SmallVector<SDOperand, 8> Parts; for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { // Copy the legal parts from the registers. - MVT::ValueType ValueVT = ValueVTs[Value]; + MVT ValueVT = ValueVTs[Value]; unsigned NumRegs = TLI->getNumRegisters(ValueVT); - MVT::ValueType RegisterVT = RegVTs[Value]; + MVT RegisterVT = RegVTs[Value]; Parts.resize(NumRegs); for (unsigned i = 0; i != NumRegs; ++i) { @@ -3543,9 +3543,9 @@ void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG, unsigned NumRegs = Regs.size(); SmallVector<SDOperand, 8> Parts(NumRegs); for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { - MVT::ValueType ValueVT = ValueVTs[Value]; + MVT ValueVT = ValueVTs[Value]; unsigned NumParts = TLI->getNumRegisters(ValueVT); - MVT::ValueType RegisterVT = RegVTs[Value]; + MVT RegisterVT = RegVTs[Value]; getCopyToParts(DAG, Val.getValue(Val.ResNo + Value), &Parts[Part], NumParts, RegisterVT); @@ -3586,11 +3586,11 @@ void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG, /// values added into it. void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, std::vector<SDOperand> &Ops) const { - MVT::ValueType IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy(); + MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy(); Ops.push_back(DAG.getTargetConstant(Code | (Regs.size() << 3), IntPtrTy)); for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { unsigned NumRegs = TLI->getNumRegisters(ValueVTs[Value]); - MVT::ValueType RegisterVT = RegVTs[Value]; + MVT RegisterVT = RegVTs[Value]; for (unsigned i = 0; i != NumRegs; ++i) Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT)); } @@ -3603,11 +3603,11 @@ static const TargetRegisterClass * isAllocatableRegister(unsigned Reg, MachineFunction &MF, const TargetLowering &TLI, const TargetRegisterInfo *TRI) { - MVT::ValueType FoundVT = MVT::Other; + MVT FoundVT = MVT::Other; const TargetRegisterClass *FoundRC = 0; for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(), E = TRI->regclass_end(); RCI != E; ++RCI) { - MVT::ValueType ThisVT = MVT::Other; + MVT ThisVT = MVT::Other; const TargetRegisterClass *RC = *RCI; // If none of the the value types for this register class are valid, we @@ -3619,7 +3619,7 @@ isAllocatableRegister(unsigned Reg, MachineFunction &MF, // choose the one with the largest VT specified. For example, on // PowerPC, we favor f64 register classes over f32. if (FoundVT == MVT::Other || - MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) { + FoundVT.getSizeInBits() < (*I).getSizeInBits()) { ThisVT = *I; break; } @@ -3745,8 +3745,8 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo, bool HasEarlyClobber, unsigned NumRegs = 1; if (OpInfo.ConstraintVT != MVT::Other) NumRegs = TLI.getNumRegisters(OpInfo.ConstraintVT); - MVT::ValueType RegVT; - MVT::ValueType ValueVT = OpInfo.ConstraintVT; + MVT RegVT; + MVT ValueVT = OpInfo.ConstraintVT; // If this is a constraint for a specific physical register, like {r17}, @@ -3895,7 +3895,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i])); SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); - MVT::ValueType OpVT = MVT::Other; + MVT OpVT = MVT::Other; // Compute the value type for each operand. switch (OpInfo.Type) { @@ -4227,12 +4227,12 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { // bit_convert. if (const StructType *ResSTy = dyn_cast<StructType>(CS.getType())) { for (unsigned i = 0, e = ResSTy->getNumElements(); i != e; ++i) { - if (MVT::isVector(Val.Val->getValueType(i))) + if (Val.Val->getValueType(i).isVector()) Val = DAG.getNode(ISD::BIT_CONVERT, TLI.getValueType(ResSTy->getElementType(i)), Val); } } else { - if (MVT::isVector(Val.getValueType())) + if (Val.getValueType().isVector()) Val = DAG.getNode(ISD::BIT_CONVERT, TLI.getValueType(CS.getType()), Val); } @@ -4267,7 +4267,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { void SelectionDAGLowering::visitMalloc(MallocInst &I) { SDOperand Src = getValue(I.getOperand(0)); - MVT::ValueType IntPtr = TLI.getPointerTy(); + MVT IntPtr = TLI.getPointerTy(); if (IntPtr < Src.getValueType()) Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src); @@ -4298,7 +4298,7 @@ void SelectionDAGLowering::visitFree(FreeInst &I) { Entry.Node = getValue(I.getOperand(0)); Entry.Ty = TLI.getTargetData()->getIntPtrType(); Args.push_back(Entry); - MVT::ValueType IntPtr = TLI.getPointerTy(); + MVT IntPtr = TLI.getPointerTy(); std::pair<SDOperand,SDOperand> Result = TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, CallingConv::C, true, @@ -4361,11 +4361,11 @@ TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy())); // Add one result value for each formal argument. - std::vector<MVT::ValueType> RetVals; + std::vector<MVT> RetVals; unsigned j = 1; for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I, ++j) { - MVT::ValueType VT = getValueType(I->getType()); + MVT VT = getValueType(I->getType()); ISD::ArgFlagsTy Flags; unsigned OriginalAlignment = getTargetData()->getABITypeAlignment(I->getType()); @@ -4395,7 +4395,7 @@ TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { Flags.setNest(); Flags.setOrigAlign(OriginalAlignment); - MVT::ValueType RegisterVT = getRegisterType(VT); + MVT RegisterVT = getRegisterType(VT); unsigned NumRegs = getNumRegisters(VT); for (unsigned i = 0; i != NumRegs; ++i) { RetVals.push_back(RegisterVT); @@ -4438,8 +4438,8 @@ TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { unsigned Idx = 1; for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I, ++Idx) { - MVT::ValueType VT = getValueType(I->getType()); - MVT::ValueType PartVT = getRegisterType(VT); + MVT VT = getValueType(I->getType()); + MVT PartVT = getRegisterType(VT); unsigned NumParts = getNumRegisters(VT); SmallVector<SDOperand, 4> Parts(NumParts); @@ -4479,7 +4479,7 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, // Handle all of the outgoing arguments. for (unsigned i = 0, e = Args.size(); i != e; ++i) { - MVT::ValueType VT = getValueType(Args[i].Ty); + MVT VT = getValueType(Args[i].Ty); SDOperand Op = Args[i].Node; ISD::ArgFlagsTy Flags; unsigned OriginalAlignment = @@ -4510,7 +4510,7 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, Flags.setNest(); Flags.setOrigAlign(OriginalAlignment); - MVT::ValueType PartVT = getRegisterType(VT); + MVT PartVT = getRegisterType(VT); unsigned NumParts = getNumRegisters(VT); SmallVector<SDOperand, 4> Parts(NumParts); ISD::NodeType ExtendKind = ISD::ANY_EXTEND; @@ -4537,14 +4537,14 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, // Figure out the result value types. We start by making a list of // the potentially illegal return value types. - SmallVector<MVT::ValueType, 4> LoweredRetTys; - SmallVector<MVT::ValueType, 4> RetTys; + SmallVector<MVT, 4> LoweredRetTys; + SmallVector<MVT, 4> RetTys; ComputeValueVTs(*this, RetTy, RetTys); // Then we translate that to a list of legal types. for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { - MVT::ValueType VT = RetTys[I]; - MVT::ValueType RegisterVT = getRegisterType(VT); + MVT VT = RetTys[I]; + MVT RegisterVT = getRegisterType(VT); unsigned NumRegs = getNumRegisters(VT); for (unsigned i = 0; i != NumRegs; ++i) LoweredRetTys.push_back(RegisterVT); @@ -4571,8 +4571,8 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, SmallVector<SDOperand, 4> ReturnValues; unsigned RegNo = 0; for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { - MVT::ValueType VT = RetTys[I]; - MVT::ValueType RegisterVT = getRegisterType(VT); + MVT VT = RetTys[I]; + MVT RegisterVT = getRegisterType(VT); unsigned NumRegs = getNumRegisters(VT); unsigned RegNoEnd = NumRegs + RegNo; SmallVector<SDOperand, 4> Results; @@ -4609,7 +4609,7 @@ SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op, // SelectionDAGISel code //===----------------------------------------------------------------------===// -unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) { +unsigned SelectionDAGISel::MakeReg(MVT VT) { return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT)); } @@ -4798,7 +4798,7 @@ static void CheckDAGForTailCallsAndFixThem(SelectionDAG &DAG, MachineFrameInfo *MFI = MF.getFrameInfo(); if (!isByVal && IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) { - MVT::ValueType VT = Arg.getValueType(); + MVT VT = Arg.getValueType(); unsigned VReg = MF.getRegInfo(). createVirtualRegister(TLI.getRegClassFor(VT)); Chain = DAG.getCopyToReg(Chain, VReg, Arg, InFlag); @@ -4951,7 +4951,7 @@ void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB, // Remember that this register needs to added to the machine PHI node as // the input for this MBB. - MVT::ValueType VT = TLI.getValueType(PN->getType()); + MVT VT = TLI.getValueType(PN->getType()); unsigned NumRegisters = TLI.getNumRegisters(VT); for (unsigned i = 0, e = NumRegisters; i != e; ++i) PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i)); @@ -5356,7 +5356,7 @@ SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) { } // Add this to the output node. - MVT::ValueType IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy(); + MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy(); Ops.push_back(DAG.getTargetConstant(4/*MEM*/ | (SelOps.size() << 3), IntPtrTy)); Ops.insert(Ops.end(), SelOps.begin(), SelOps.end()); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp index 904638d..f922168 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp @@ -48,7 +48,7 @@ namespace llvm { template<typename EdgeIter> static std::string getEdgeAttributes(const void *Node, EdgeIter EI) { SDOperand Op = EI.getNode()->getOperand(EI.getOperand()); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); if (VT == MVT::Flag) return "color=red,style=bold"; else if (VT == MVT::Other) @@ -90,7 +90,7 @@ std::string DOTGraphTraits<SelectionDAG*>::getNodeLabel(const SDNode *Node, if (Node->getValueType(i) == MVT::Other) Op += ":ch"; else - Op = Op + ":" + MVT::getValueTypeString(Node->getValueType(i)); + Op = Op + ":" + Node->getValueType(i).getMVTString(); if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(Node)) { Op += ": " + utostr(CSDN->getValue()); @@ -154,7 +154,7 @@ std::string DOTGraphTraits<SelectionDAG*>::getNodeLabel(const SDNode *Node, } else if (const ARG_FLAGSSDNode *N = dyn_cast<ARG_FLAGSSDNode>(Node)) { Op = Op + " AF=" + N->getArgFlags().getArgFlagsString(); } else if (const VTSDNode *N = dyn_cast<VTSDNode>(Node)) { - Op = Op + " VT=" + MVT::getValueTypeString(N->getVT()); + Op = Op + " VT=" + N->getVT().getMVTString(); } else if (const StringSDNode *N = dyn_cast<StringSDNode>(Node)) { Op = Op + "\"" + N->getValue() + "\""; } else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(Node)) { @@ -172,7 +172,7 @@ std::string DOTGraphTraits<SelectionDAG*>::getNodeLabel(const SDNode *Node, break; } if (doExt) - Op += MVT::getValueTypeString(LD->getMemoryVT()) + ">"; + Op += LD->getMemoryVT().getMVTString() + ">"; if (LD->isVolatile()) Op += "<V>"; Op += LD->getIndexedModeName(LD->getAddressingMode()); @@ -180,7 +180,7 @@ std::string DOTGraphTraits<SelectionDAG*>::getNodeLabel(const SDNode *Node, Op += " A=" + utostr(LD->getAlignment()); } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(Node)) { if (ST->isTruncatingStore()) - Op += "<trunc " + MVT::getValueTypeString(ST->getMemoryVT()) + ">"; + Op += "<trunc " + ST->getMemoryVT().getMVTString() + ">"; if (ST->isVolatile()) Op += "<V>"; Op += ST->getIndexedModeName(ST->getAddressingMode()); diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 408a5b2..774988a 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -180,12 +180,12 @@ TargetLowering::TargetLowering(TargetMachine &tm) // Default all indexed load / store to expand. for (unsigned IM = (unsigned)ISD::PRE_INC; IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { - setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand); - setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand); + setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand); + setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand); } // These operations default to expand. - setOperationAction(ISD::FGETSIGN, (MVT::ValueType)VT, Expand); + setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand); } // Most targets ignore the @llvm.prefetch intrinsic. @@ -244,7 +244,7 @@ void TargetLowering::computeRegisterProperties() { // Everything defaults to needing one register. for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { NumRegistersForVT[i] = 1; - RegisterTypeForVT[i] = TransformToType[i] = i; + RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; } // ...except isVoid, which doesn't need any registers. NumRegistersForVT[MVT::isVoid] = 0; @@ -256,24 +256,28 @@ void TargetLowering::computeRegisterProperties() { // Every integer value type larger than this largest register takes twice as // many registers to represent as the previous ValueType. - for (MVT::ValueType ExpandedReg = LargestIntReg + 1; - MVT::isInteger(ExpandedReg); ++ExpandedReg) { + for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) { + MVT EVT = (MVT::SimpleValueType)ExpandedReg; + if (!EVT.isInteger()) + break; NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; - RegisterTypeForVT[ExpandedReg] = LargestIntReg; - TransformToType[ExpandedReg] = ExpandedReg - 1; - ValueTypeActions.setTypeAction(ExpandedReg, Expand); + RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; + TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); + ValueTypeActions.setTypeAction(EVT, Expand); } // Inspect all of the ValueType's smaller than the largest integer // register to see which ones need promotion. - MVT::ValueType LegalIntReg = LargestIntReg; - for (MVT::ValueType IntReg = LargestIntReg - 1; - IntReg >= MVT::i1; --IntReg) { - if (isTypeLegal(IntReg)) { + unsigned LegalIntReg = LargestIntReg; + for (unsigned IntReg = LargestIntReg - 1; + IntReg >= (unsigned)MVT::i1; --IntReg) { + MVT IVT = (MVT::SimpleValueType)IntReg; + if (isTypeLegal(IVT)) { LegalIntReg = IntReg; } else { - RegisterTypeForVT[IntReg] = TransformToType[IntReg] = LegalIntReg; - ValueTypeActions.setTypeAction(IntReg, Promote); + RegisterTypeForVT[IntReg] = TransformToType[IntReg] = + (MVT::SimpleValueType)LegalIntReg; + ValueTypeActions.setTypeAction(IVT, Promote); } } @@ -311,18 +315,19 @@ void TargetLowering::computeRegisterProperties() { } // Loop over all of the vector value types to see which need transformations. - for (MVT::ValueType i = MVT::FIRST_VECTOR_VALUETYPE; - i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { - if (!isTypeLegal(i)) { - MVT::ValueType IntermediateVT, RegisterVT; + for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; + i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { + MVT VT = (MVT::SimpleValueType)i; + if (!isTypeLegal(VT)) { + MVT IntermediateVT, RegisterVT; unsigned NumIntermediates; NumRegistersForVT[i] = - getVectorTypeBreakdown(i, + getVectorTypeBreakdown(VT, IntermediateVT, NumIntermediates, RegisterVT); RegisterTypeForVT[i] = RegisterVT; TransformToType[i] = MVT::Other; // this isn't actually used - ValueTypeActions.setTypeAction(i, Expand); + ValueTypeActions.setTypeAction(VT, Expand); } } } @@ -332,8 +337,7 @@ const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { } -MVT::ValueType -TargetLowering::getSetCCResultType(const SDOperand &) const { +MVT TargetLowering::getSetCCResultType(const SDOperand &) const { return getValueType(TD->getIntPtrType()); } @@ -347,13 +351,13 @@ TargetLowering::getSetCCResultType(const SDOperand &) const { /// register. It also returns the VT and quantity of the intermediate values /// before they are promoted/expanded. /// -unsigned TargetLowering::getVectorTypeBreakdown(MVT::ValueType VT, - MVT::ValueType &IntermediateVT, +unsigned TargetLowering::getVectorTypeBreakdown(MVT VT, + MVT &IntermediateVT, unsigned &NumIntermediates, - MVT::ValueType &RegisterVT) const { + MVT &RegisterVT) const { // Figure out the right, legal destination reg to copy into. - unsigned NumElts = MVT::getVectorNumElements(VT); - MVT::ValueType EltTy = MVT::getVectorElementType(VT); + unsigned NumElts = VT.getVectorNumElements(); + MVT EltTy = VT.getVectorElementType(); unsigned NumVectorRegs = 1; @@ -366,24 +370,23 @@ unsigned TargetLowering::getVectorTypeBreakdown(MVT::ValueType VT, // Divide the input until we get to a supported size. This will always // end with a scalar if the target doesn't support vectors. - while (NumElts > 1 && - !isTypeLegal(MVT::getVectorType(EltTy, NumElts))) { + while (NumElts > 1 && !isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) { NumElts >>= 1; NumVectorRegs <<= 1; } NumIntermediates = NumVectorRegs; - MVT::ValueType NewVT = MVT::getVectorType(EltTy, NumElts); + MVT NewVT = MVT::getVectorVT(EltTy, NumElts); if (!isTypeLegal(NewVT)) NewVT = EltTy; IntermediateVT = NewVT; - MVT::ValueType DestVT = getTypeToTransformTo(NewVT); + MVT DestVT = getTypeToTransformTo(NewVT); RegisterVT = DestVT; if (DestVT < NewVT) { // Value is expanded, e.g. i64 -> i16. - return NumVectorRegs*(MVT::getSizeInBits(NewVT)/MVT::getSizeInBits(DestVT)); + return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits()); } else { // Otherwise, promotion or legal types use the same number of registers as // the vector decimated to the appropriate level. @@ -425,7 +428,7 @@ bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op, case ISD::XOR: if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) if (C->getAPIntValue().intersects(~Demanded)) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), DAG.getConstant(Demanded & C->getAPIntValue(), @@ -597,7 +600,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known if ((KnownOne & KnownOne2) == KnownOne) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0), ANDC)); @@ -612,7 +615,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, // if we can expand it to have all bits set, do it if (Expanded.isAllOnesValue()) { if (Expanded != C->getAPIntValue()) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand New = TLO.DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), TLO.DAG.getConstant(Expanded, VT)); return TLO.CombineTo(Op, New); @@ -688,7 +691,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, SDOperand NewSA = TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, InOp.getOperand(0), NewSA)); } @@ -705,9 +708,9 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, break; case ISD::SRL: if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); unsigned ShAmt = SA->getValue(); - unsigned VTSize = MVT::getSizeInBits(VT); + unsigned VTSize = VT.getSizeInBits(); SDOperand InOp = Op.getOperand(0); // If the shift count is an invalid immediate, don't do anything. @@ -749,7 +752,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, break; case ISD::SRA: if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); unsigned ShAmt = SA->getValue(); // If the shift count is an invalid immediate, don't do anything. @@ -762,7 +765,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, // demand the input sign bit. APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); if (HighBits.intersects(NewMask)) - InDemandedMask |= APInt::getSignBit(MVT::getSizeInBits(VT)); + InDemandedMask |= APInt::getSignBit(VT.getSizeInBits()); if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, KnownZero, KnownOne, TLO, Depth+1)) @@ -785,22 +788,22 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, } break; case ISD::SIGN_EXTEND_INREG: { - MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); + MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); // Sign extension. Compute the demanded bits in the result that are not // present in the input. APInt NewBits = APInt::getHighBitsSet(BitWidth, - BitWidth - MVT::getSizeInBits(EVT)) & + BitWidth - EVT.getSizeInBits()) & NewMask; // If none of the extended bits are demanded, eliminate the sextinreg. if (NewBits == 0) return TLO.CombineTo(Op, Op.getOperand(0)); - APInt InSignBit = APInt::getSignBit(MVT::getSizeInBits(EVT)); + APInt InSignBit = APInt::getSignBit(EVT.getSizeInBits()); InSignBit.zext(BitWidth); APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, - MVT::getSizeInBits(EVT)) & + EVT.getSizeInBits()) & NewMask; // Since the sign extended bits are demanded, we know that the sign @@ -852,8 +855,8 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, break; } case ISD::SIGN_EXTEND: { - MVT::ValueType InVT = Op.getOperand(0).getValueType(); - unsigned InBits = MVT::getSizeInBits(InVT); + MVT InVT = Op.getOperand(0).getValueType(); + unsigned InBits = InVT.getSizeInBits(); APInt InMask = APInt::getLowBitsSet(BitWidth, InBits); APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits); APInt NewBits = ~InMask & NewMask; @@ -948,9 +951,9 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, break; } case ISD::AssertZext: { - MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); + MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); APInt InMask = APInt::getLowBitsSet(BitWidth, - MVT::getSizeInBits(VT)); + VT.getSizeInBits()); if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask, KnownZero, KnownOne, TLO, Depth+1)) return true; @@ -962,7 +965,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, #if 0 // If this is an FP->Int bitcast and if the sign bit is the only thing that // is demanded, turn this into a FGETSIGN. - if (NewMask == MVT::getIntVTSignBit(Op.getValueType()) && + if (NewMask == MVT::getIntegerVTSignBit(Op.getValueType()) && MVT::isFloatingPoint(Op.getOperand(0).getValueType()) && !MVT::isVector(Op.getOperand(0).getValueType())) { // Only do this xform if FGETSIGN is valid or if before legalize. @@ -972,7 +975,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, // place. We expect the SHL to be eliminated by other optimizations. SDOperand Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(), Op.getOperand(0)); - unsigned ShVal = MVT::getSizeInBits(Op.getValueType())-1; + unsigned ShVal = Op.getValueType().getSizeInBits()-1; SDOperand ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy()); return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(), Sign, ShAmt)); @@ -1030,7 +1033,7 @@ unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDOperand Op, /// SimplifySetCC - Try to simplify a setcc built with the specified operands /// and cc. If it is unable to simplify it, return a null SDOperand. SDOperand -TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, +TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -1057,7 +1060,7 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, N0.getOperand(1).getOpcode() == ISD::Constant) { unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getValue(); if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && - ShAmt == Log2_32(MVT::getSizeInBits(N0.getValueType()))) { + ShAmt == Log2_32(N0.getValueType().getSizeInBits())) { if ((C1 == 0) == (Cond == ISD::SETEQ)) { // (srl (ctlz x), 5) == 0 -> X != 0 // (srl (ctlz x), 5) != 1 -> X != 0 @@ -1075,7 +1078,7 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, // If the LHS is a ZERO_EXTEND, perform the comparison on the input. if (N0.getOpcode() == ISD::ZERO_EXTEND) { - unsigned InSize = MVT::getSizeInBits(N0.getOperand(0).getValueType()); + unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits(); // If the comparison constant has bits in the upper part, the // zero-extended value could never match. @@ -1118,10 +1121,10 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, } } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { - MVT::ValueType ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); - unsigned ExtSrcTyBits = MVT::getSizeInBits(ExtSrcTy); - MVT::ValueType ExtDstTy = N0.getValueType(); - unsigned ExtDstTyBits = MVT::getSizeInBits(ExtDstTy); + MVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); + unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); + MVT ExtDstTy = N0.getValueType(); + unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); // If the extended part has any inconsistent bits, it cannot ever // compare equal. In other words, they have to be all ones or all @@ -1132,7 +1135,7 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, return DAG.getConstant(Cond == ISD::SETNE, VT); SDOperand ZextOp; - MVT::ValueType Op0Ty = N0.getOperand(0).getValueType(); + MVT Op0Ty = N0.getOperand(0).getValueType(); if (Op0Ty == ExtSrcTy) { ZextOp = N0.getOperand(0); } else { @@ -1161,7 +1164,7 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, // Invert the condition. ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); CC = ISD::getSetCCInverse(CC, - MVT::isInteger(N0.getOperand(0).getValueType())); + N0.getOperand(0).getValueType().isInteger()); return DAG.getSetCC(VT, N0.getOperand(0), N0.getOperand(1), CC); } @@ -1196,7 +1199,7 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, } APInt MinVal, MaxVal; - unsigned OperandBitSize = MVT::getSizeInBits(N1C->getValueType(0)); + unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits(); if (ISD::isSignedIntSetCC(Cond)) { MinVal = APInt::getSignedMinValue(OperandBitSize); MaxVal = APInt::getSignedMaxValue(OperandBitSize); @@ -1313,7 +1316,7 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, if (N0 == N1) { // We can always fold X == X for integer setcc's. - if (MVT::isInteger(N0.getValueType())) + if (N0.getValueType().isInteger()) return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); unsigned UOF = ISD::getUnorderedFlavor(Cond); if (UOF == 2) // FP operators that are undefined on NaNs. @@ -1328,7 +1331,7 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, } if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && - MVT::isInteger(N0.getValueType())) { + N0.getValueType().isInteger()) { if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || N0.getOpcode() == ISD::XOR) { // Simplify (X+Y) == (X+Z) --> Y == Z @@ -1517,8 +1520,8 @@ bool TargetLowering::isConsecutiveLoad(SDNode *LD, SDNode *Base, const MachineFrameInfo *MFI) const { if (LD->getOperand(0).Val != Base->getOperand(0).Val) return false; - MVT::ValueType VT = LD->getValueType(0); - if (MVT::getSizeInBits(VT) / 8 != Bytes) + MVT VT = LD->getValueType(0); + if (VT.getSizeInBits() / 8 != Bytes) return false; SDOperand Loc = LD->getOperand(1); @@ -1593,10 +1596,10 @@ TargetLowering::getConstraintType(const std::string &Constraint) const { /// LowerXConstraint - try to replace an X constraint, which matches anything, /// with another that has more specific requirements based on the type of the /// corresponding operand. -const char *TargetLowering::LowerXConstraint(MVT::ValueType ConstraintVT) const{ - if (MVT::isInteger(ConstraintVT)) +const char *TargetLowering::LowerXConstraint(MVT ConstraintVT) const{ + if (ConstraintVT.isInteger()) return "r"; - if (MVT::isFloatingPoint(ConstraintVT)) + if (ConstraintVT.isFloatingPoint()) return "f"; // works for many targets return 0; } @@ -1661,14 +1664,14 @@ void TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, std::vector<unsigned> TargetLowering:: getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { return std::vector<unsigned>(); } std::pair<unsigned, const TargetRegisterClass*> TargetLowering:: getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { if (Constraint[0] != '{') return std::pair<unsigned, const TargetRegisterClass*>(0, 0); assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); @@ -2039,7 +2042,7 @@ static mu magicu64(uint64_t d) /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, std::vector<SDNode*>* Created) const { - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // Check to see if we can do this. if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) @@ -2080,7 +2083,7 @@ SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, } // Extract the sign bit and add it to the quotient SDOperand T = - DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(MVT::getSizeInBits(VT)-1, + DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(VT.getSizeInBits()-1, getShiftAmountTy())); if (Created) Created->push_back(T.Val); @@ -2093,7 +2096,7 @@ SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, std::vector<SDNode*>* Created) const { - MVT::ValueType VT = N->getValueType(0); + MVT VT = N->getValueType(0); // Check to see if we can do this. if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 5029a69..eaa16fc 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -660,7 +660,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { case ISD::LOAD: { LoadSDNode *LD = cast<LoadSDNode>(Op); ISD::MemIndexedMode AM = LD->getAddressingMode(); - MVT::ValueType LoadedVT = LD->getMemoryVT(); + MVT LoadedVT = LD->getMemoryVT(); if (AM != ISD::UNINDEXED) { SDOperand Offset, AMOpc; bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); @@ -741,7 +741,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { } case ARMISD::CMOV: { bool isThumb = Subtarget->isThumb(); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand N0 = Op.getOperand(0); SDOperand N1 = Op.getOperand(1); SDOperand N2 = Op.getOperand(2); @@ -805,7 +805,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { cast<ConstantSDNode>(N2)->getValue()), MVT::i32); SDOperand Ops[] = { N0, N1, Tmp2, N3, InFlag }; unsigned Opc = 0; - switch (VT) { + switch (VT.getSimpleVT()) { default: assert(false && "Illegal conditional move type!"); break; case MVT::i32: @@ -821,7 +821,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { return CurDAG->SelectNodeTo(Op.Val, Opc, VT, Ops, 5); } case ARMISD::CNEG: { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand N0 = Op.getOperand(0); SDOperand N1 = Op.getOperand(1); SDOperand N2 = Op.getOperand(2); @@ -837,7 +837,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { cast<ConstantSDNode>(N2)->getValue()), MVT::i32); SDOperand Ops[] = { N0, N1, Tmp2, N3, InFlag }; unsigned Opc = 0; - switch (VT) { + switch (VT.getSimpleVT()) { default: assert(false && "Illegal conditional move type!"); break; case MVT::f32: diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index dc76b7b..cfb98cb 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -363,7 +363,7 @@ static bool FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, } static void -HowToPassArgument(MVT::ValueType ObjectVT, unsigned NumGPRs, +HowToPassArgument(MVT ObjectVT, unsigned NumGPRs, unsigned StackOffset, unsigned &NeededGPRs, unsigned &NeededStackSize, unsigned &GPRPad, unsigned &StackPad, ISD::ArgFlagsTy Flags) { @@ -375,7 +375,7 @@ HowToPassArgument(MVT::ValueType ObjectVT, unsigned NumGPRs, GPRPad = NumGPRs % ((align + 3)/4); StackPad = StackOffset % align; unsigned firstGPR = NumGPRs + GPRPad; - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); case MVT::i32: case MVT::f32: @@ -400,7 +400,7 @@ HowToPassArgument(MVT::ValueType ObjectVT, unsigned NumGPRs, /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter /// nodes. SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType RetVT= Op.Val->getValueType(0); + MVT RetVT= Op.Val->getValueType(0); SDOperand Chain = Op.getOperand(0); unsigned CallConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); assert((CallConv == CallingConv::C || @@ -419,7 +419,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { unsigned ObjGPRs; unsigned StackPad; unsigned GPRPad; - MVT::ValueType ObjectVT = Op.getOperand(5+2*i).getValueType(); + MVT ObjectVT = Op.getOperand(5+2*i).getValueType(); ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags(); HowToPassArgument(ObjectVT, NumGPRs, NumBytes, ObjGPRs, ObjSize, @@ -446,7 +446,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { SDOperand Arg = Op.getOperand(5+2*i); ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags(); - MVT::ValueType ArgVT = Arg.getValueType(); + MVT ArgVT = Arg.getValueType(); unsigned ObjSize; unsigned ObjGPRs; @@ -457,7 +457,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { NumGPRs += GPRPad; ArgOffset += StackPad; if (ObjGPRs > 0) { - switch (ArgVT) { + switch (ArgVT.getSimpleVT()) { default: assert(0 && "Unexpected ValueType for argument!"); case MVT::i32: RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Arg)); @@ -587,7 +587,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { InFlag = Chain.getValue(1); } - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. @@ -617,7 +617,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { NodeTys.clear(); // If the call has results, copy the values out of the ret val registers. - switch (RetVT) { + switch (RetVT.getSimpleVT()) { default: assert(0 && "Unexpected ret value!"); case MVT::Other: break; @@ -708,7 +708,7 @@ static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { // be used to form addressing mode. These wrapped nodes will be selected // into MOVi. static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); SDOperand Res; if (CP->isMachineConstantPoolEntry()) @@ -724,7 +724,7 @@ static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { SDOperand ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) { - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue, @@ -758,7 +758,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, GlobalValue *GV = GA->getGlobal(); SDOperand Offset; SDOperand Chain = DAG.getEntryNode(); - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); // Get the Thread Pointer SDOperand ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, PtrVT); @@ -807,7 +807,7 @@ ARMTargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { SDOperand ARMTargetLowering::LowerGlobalAddressELF(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); Reloc::Model RelocM = getTargetMachine().getRelocationModel(); if (RelocM == Reloc::PIC_) { @@ -840,7 +840,7 @@ static bool GVIsIndirectSymbol(GlobalValue *GV, Reloc::Model RelocM) { SDOperand ARMTargetLowering::LowerGlobalAddressDarwin(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); Reloc::Model RelocM = getTargetMachine().getRelocationModel(); bool IsIndirect = GVIsIndirectSymbol(GV, RelocM); @@ -875,7 +875,7 @@ SDOperand ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDOperand Op, SelectionDAG &DAG){ assert(Subtarget->isTargetELF() && "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; ARMConstantPoolValue *CPV = new ARMConstantPoolValue("_GLOBAL_OFFSET_TABLE_", ARMPCLabelIndex, @@ -888,7 +888,7 @@ SDOperand ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDOperand Op, } static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); switch (IntNo) { default: return SDOperand(); // Don't custom lower most intrinsics. @@ -901,7 +901,7 @@ static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, unsigned VarArgsFrameIndex) { // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0); @@ -911,7 +911,7 @@ static SDOperand LowerFORMAL_ARGUMENT(SDOperand Op, SelectionDAG &DAG, unsigned ArgNo, unsigned &NumGPRs, unsigned &ArgOffset) { MachineFunction &MF = DAG.getMachineFunction(); - MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); + MVT ObjectVT = Op.getValue(ArgNo).getValueType(); SDOperand Root = Op.getOperand(0); std::vector<SDOperand> ArgValues; MachineRegisterInfo &RegInfo = MF.getRegInfo(); @@ -1025,7 +1025,7 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { ArgValues.push_back(Root); // Return the new list of results. - std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), + std::vector<MVT> RetVT(Op.Val->value_begin(), Op.Val->value_end()); return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); } @@ -1123,7 +1123,7 @@ static SDOperand getVFPCmp(SDOperand LHS, SDOperand RHS, SelectionDAG &DAG) { static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG, const ARMSubtarget *ST) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand LHS = Op.getOperand(0); SDOperand RHS = Op.getOperand(1); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); @@ -1195,7 +1195,7 @@ SDOperand ARMTargetLowering::LowerBR_JT(SDOperand Op, SelectionDAG &DAG) { SDOperand Table = Op.getOperand(1); SDOperand Index = Op.getOperand(2); - MVT::ValueType PTy = getPointerTy(); + MVT PTy = getPointerTy(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); SDOperand UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); @@ -1204,7 +1204,7 @@ SDOperand ARMTargetLowering::LowerBR_JT(SDOperand Op, SelectionDAG &DAG) { Index = DAG.getNode(ISD::MUL, PTy, Index, DAG.getConstant(4, PTy)); SDOperand Addr = DAG.getNode(ISD::ADD, PTy, Index, Table); bool isPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_; - Addr = DAG.getLoad(isPIC ? (MVT::ValueType)MVT::i32 : PTy, + Addr = DAG.getLoad(isPIC ? (MVT)MVT::i32 : PTy, Chain, Addr, NULL, 0); Chain = Addr.getValue(1); if (isPIC) @@ -1220,7 +1220,7 @@ static SDOperand LowerFP_TO_INT(SDOperand Op, SelectionDAG &DAG) { } static SDOperand LowerINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); unsigned Opc = Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF; @@ -1232,8 +1232,8 @@ static SDOperand LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { // Implement fcopysign with a fabs and a conditional fneg. SDOperand Tmp0 = Op.getOperand(0); SDOperand Tmp1 = Op.getOperand(1); - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType SrcVT = Tmp1.getValueType(); + MVT VT = Op.getValueType(); + MVT SrcVT = Tmp1.getValueType(); SDOperand AbsVal = DAG.getNode(ISD::FABS, VT, Tmp0); SDOperand Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG); SDOperand ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32); @@ -1265,7 +1265,7 @@ ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, unsigned BytesLeft = SizeVal & 3; unsigned NumMemOps = SizeVal >> 2; unsigned EmittedNumMemOps = 0; - MVT::ValueType VT = MVT::i32; + MVT VT = MVT::i32; unsigned VTSize = 4; unsigned i = 0; const unsigned MAX_LOADS_IN_LDM = 6; @@ -1536,7 +1536,7 @@ SDOperand ARMTargetLowering::PerformDAGCombine(SDNode *N, /// isLegalAddressImmediate - Return true if the integer value can be used /// as the offset of the target addressing mode for load / store of the /// given type. -static bool isLegalAddressImmediate(int64_t V, MVT::ValueType VT, +static bool isLegalAddressImmediate(int64_t V, MVT VT, const ARMSubtarget *Subtarget) { if (V == 0) return true; @@ -1546,7 +1546,7 @@ static bool isLegalAddressImmediate(int64_t V, MVT::ValueType VT, return false; unsigned Scale = 1; - switch (VT) { + switch (VT.getSimpleVT()) { default: return false; case MVT::i1: case MVT::i8: @@ -1570,7 +1570,7 @@ static bool isLegalAddressImmediate(int64_t V, MVT::ValueType VT, if (V < 0) V = - V; - switch (VT) { + switch (VT.getSimpleVT()) { default: return false; case MVT::i1: case MVT::i8: @@ -1615,7 +1615,7 @@ bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, return false; int Scale = AM.Scale; - switch (getValueType(Ty)) { + switch (getValueType(Ty).getSimpleVT()) { default: return false; case MVT::i1: case MVT::i8: @@ -1650,7 +1650,7 @@ bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, } -static bool getIndexedAddressParts(SDNode *Ptr, MVT::ValueType VT, +static bool getIndexedAddressParts(SDNode *Ptr, MVT VT, bool isSEXTLoad, SDOperand &Base, SDOperand &Offset, bool &isInc, SelectionDAG &DAG) { @@ -1717,7 +1717,7 @@ ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, if (Subtarget->isThumb()) return false; - MVT::ValueType VT; + MVT VT; SDOperand Ptr; bool isSEXTLoad = false; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { @@ -1751,7 +1751,7 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, if (Subtarget->isThumb()) return false; - MVT::ValueType VT; + MVT VT; SDOperand Ptr; bool isSEXTLoad = false; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { @@ -1816,7 +1816,7 @@ ARMTargetLowering::getConstraintType(const std::string &Constraint) const { std::pair<unsigned, const TargetRegisterClass*> ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { if (Constraint.size() == 1) { // GCC RS6000 Constraint Letters switch (Constraint[0]) { @@ -1838,7 +1838,7 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, std::vector<unsigned> ARMTargetLowering:: getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { if (Constraint.size() != 1) return std::vector<unsigned>(); diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index ce6d5fe..8e5a8b3 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -114,10 +114,10 @@ namespace llvm { ConstraintType getConstraintType(const std::string &Constraint) const; std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; virtual const ARMSubtarget* getSubtarget() { return Subtarget; diff --git a/lib/Target/Alpha/AlphaISelDAGToDAG.cpp b/lib/Target/Alpha/AlphaISelDAGToDAG.cpp index 303c5aa..c7eefcc 100644 --- a/lib/Target/Alpha/AlphaISelDAGToDAG.cpp +++ b/lib/Target/Alpha/AlphaISelDAGToDAG.cpp @@ -334,7 +334,7 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { case ISD::TargetConstantFP: { ConstantFPSDNode *CN = cast<ConstantFPSDNode>(N); bool isDouble = N->getValueType(0) == MVT::f64; - MVT::ValueType T = isDouble ? MVT::f64 : MVT::f32; + MVT T = isDouble ? MVT::f64 : MVT::f32; if (CN->getValueAPF().isPosZero()) { return CurDAG->SelectNodeTo(N, isDouble ? Alpha::CPYST : Alpha::CPYSS, T, CurDAG->getRegister(Alpha::F31, T), @@ -350,7 +350,7 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { } case ISD::SETCC: - if (MVT::isFloatingPoint(N->getOperand(0).Val->getValueType(0))) { + if (N->getOperand(0).Val->getValueType(0).isFloatingPoint()) { ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); unsigned Opc = Alpha::WTF; @@ -404,9 +404,9 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { break; case ISD::SELECT: - if (MVT::isFloatingPoint(N->getValueType(0)) && + if (N->getValueType(0).isFloatingPoint() && (N->getOperand(0).getOpcode() != ISD::SETCC || - !MVT::isFloatingPoint(N->getOperand(0).getOperand(1).getValueType()))) { + !N->getOperand(0).getOperand(1).getValueType().isFloatingPoint())) { //This should be the condition not covered by the Patterns //FIXME: Don't have SelectCode die, but rather return something testable // so that things like this can be caught in fall though code @@ -472,7 +472,7 @@ void AlphaDAGToDAGISel::SelectCALL(SDOperand Op) { AddToISelQueue(Chain); std::vector<SDOperand> CallOperands; - std::vector<MVT::ValueType> TypeOperands; + std::vector<MVT> TypeOperands; //grab the arguments for(int i = 2, e = N->getNumOperands(); i < e; ++i) { @@ -489,7 +489,7 @@ void AlphaDAGToDAGISel::SelectCALL(SDOperand Op) { for (int i = 6; i < count; ++i) { unsigned Opc = Alpha::WTF; - if (MVT::isInteger(TypeOperands[i])) { + if (TypeOperands[i].isInteger()) { Opc = Alpha::STQ; } else if (TypeOperands[i] == MVT::f32) { Opc = Alpha::STS; @@ -504,7 +504,7 @@ void AlphaDAGToDAGISel::SelectCALL(SDOperand Op) { Chain = SDOperand(CurDAG->getTargetNode(Opc, MVT::Other, Ops, 4), 0); } for (int i = 0; i < std::min(6, count); ++i) { - if (MVT::isInteger(TypeOperands[i])) { + if (TypeOperands[i].isInteger()) { Chain = CurDAG->getCopyToReg(Chain, args_int[i], CallOperands[i], InFlag); InFlag = Chain.getValue(1); } else if (TypeOperands[i] == MVT::f32 || TypeOperands[i] == MVT::f64) { @@ -533,7 +533,7 @@ void AlphaDAGToDAGISel::SelectCALL(SDOperand Op) { std::vector<SDOperand> CallResults; - switch (N->getValueType(0)) { + switch (N->getValueType(0).getSimpleVT()) { default: assert(0 && "Unexpected ret value!"); case MVT::Other: break; case MVT::i64: diff --git a/lib/Target/Alpha/AlphaISelLowering.cpp b/lib/Target/Alpha/AlphaISelLowering.cpp index 91b1180..494edda 100644 --- a/lib/Target/Alpha/AlphaISelLowering.cpp +++ b/lib/Target/Alpha/AlphaISelLowering.cpp @@ -145,8 +145,7 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM) : TargetLowering(TM) computeRegisterProperties(); } -MVT::ValueType -AlphaTargetLowering::getSetCCResultType(const SDOperand &) const { +MVT AlphaTargetLowering::getSetCCResultType(const SDOperand &) const { return MVT::i64; } @@ -169,7 +168,7 @@ const char *AlphaTargetLowering::getTargetNodeName(unsigned Opcode) const { } static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); SDOperand Zero = DAG.getConstant(0, PtrVT); @@ -217,14 +216,13 @@ static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { SDOperand argt; - MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); + MVT ObjectVT = Op.getValue(ArgNo).getValueType(); SDOperand ArgVal; if (ArgNo < 6) { - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: - cerr << "Unknown Type " << ObjectVT << "\n"; - abort(); + assert(false && "Invalid value type!"); case MVT::f64: args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo], &Alpha::F8RCRegClass); @@ -282,7 +280,7 @@ static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, ArgValues.push_back(Root); // Return the new list of results. - std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), + std::vector<MVT> RetVT(Op.Val->value_begin(), Op.Val->value_end()); return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); } @@ -300,12 +298,12 @@ static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { break; //return SDOperand(); // ret void is legal case 3: { - MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); + MVT ArgVT = Op.getOperand(1).getValueType(); unsigned ArgReg; - if (MVT::isInteger(ArgVT)) + if (ArgVT.isInteger()) ArgReg = Alpha::R0; else { - assert(MVT::isFloatingPoint(ArgVT)); + assert(ArgVT.isFloatingPoint()); ArgReg = Alpha::F0; } Copy = DAG.getCopyToReg(Copy, ArgReg, Op.getOperand(1), Copy.getValue(1)); @@ -332,7 +330,7 @@ AlphaTargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, std::vector<SDOperand> args_to_use; for (unsigned i = 0, e = Args.size(); i != e; ++i) { - switch (getValueType(Args[i].Ty)) { + switch (getValueType(Args[i].Ty).getSimpleVT()) { default: assert(0 && "Unexpected ValueType for argument!"); case MVT::i1: case MVT::i8: @@ -355,9 +353,9 @@ AlphaTargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, args_to_use.push_back(Args[i].Node); } - std::vector<MVT::ValueType> RetVals; - MVT::ValueType RetTyVT = getValueType(RetTy); - MVT::ValueType ActualRetTyVT = RetTyVT; + std::vector<MVT> RetVals; + MVT RetTyVT = getValueType(RetTy); + MVT ActualRetTyVT = RetTyVT; if (RetTyVT >= MVT::i1 && RetTyVT <= MVT::i32) ActualRetTyVT = MVT::i64; @@ -407,17 +405,17 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::JumpTable: return LowerJumpTable(Op, DAG); case ISD::SINT_TO_FP: { - assert(MVT::i64 == Op.getOperand(0).getValueType() && + assert(Op.getOperand(0).getValueType() == MVT::i64 && "Unhandled SINT_TO_FP type in custom expander!"); SDOperand LD; - bool isDouble = MVT::f64 == Op.getValueType(); + bool isDouble = Op.getValueType() == MVT::f64; LD = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); SDOperand FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, isDouble?MVT::f64:MVT::f32, LD); return FP; } case ISD::FP_TO_SINT: { - bool isDouble = MVT::f64 == Op.getOperand(0).getValueType(); + bool isDouble = Op.getOperand(0).getValueType() == MVT::f64; SDOperand src = Op.getOperand(0); if (!isDouble) //Promote @@ -465,7 +463,7 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::SREM: //Expand only on constant case if (Op.getOperand(1).getOpcode() == ISD::Constant) { - MVT::ValueType VT = Op.Val->getValueType(0); + MVT VT = Op.Val->getValueType(0); SDOperand Tmp1 = Op.Val->getOpcode() == ISD::UREM ? BuildUDIV(Op.Val, DAG, NULL) : BuildSDIV(Op.Val, DAG, NULL); @@ -476,7 +474,7 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { //fall through case ISD::SDIV: case ISD::UDIV: - if (MVT::isInteger(Op.getValueType())) { + if (Op.getValueType().isInteger()) { if (Op.getOperand(1).getOpcode() == ISD::Constant) return Op.getOpcode() == ISD::SDIV ? BuildSDIV(Op.Val, DAG, NULL) : BuildUDIV(Op.Val, DAG, NULL); @@ -505,7 +503,7 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { SDOperand Offset = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, Base.getValue(1), Tmp, NULL, 0, MVT::i32); SDOperand DataPtr = DAG.getNode(ISD::ADD, MVT::i64, Base, Offset); - if (MVT::isFloatingPoint(Op.getValueType())) + if (Op.getValueType().isFloatingPoint()) { //if fp && Offset < 6*8, then subtract 6*8 from DataPtr SDOperand FPDataPtr = DAG.getNode(ISD::SUB, MVT::i64, DataPtr, @@ -596,7 +594,7 @@ AlphaTargetLowering::getConstraintType(const std::string &Constraint) const { std::vector<unsigned> AlphaTargetLowering:: getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { default: break; // Unknown constriant letter diff --git a/lib/Target/Alpha/AlphaISelLowering.h b/lib/Target/Alpha/AlphaISelLowering.h index 8738d02..f88437e 100644 --- a/lib/Target/Alpha/AlphaISelLowering.h +++ b/lib/Target/Alpha/AlphaISelLowering.h @@ -67,7 +67,7 @@ namespace llvm { explicit AlphaTargetLowering(TargetMachine &TM); /// getSetCCResultType - Get the SETCC result ValueType - virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDOperand &) const; /// LowerOperation - Provide custom lowering hooks for some operations. /// @@ -88,7 +88,7 @@ namespace llvm { std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; bool hasITOF() { return useITOF; } diff --git a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp index b491b13..c181f3c 100644 --- a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp +++ b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp @@ -110,7 +110,7 @@ namespace { bool isIntS16Immediate(ConstantSDNode *CN, short &Imm) { - MVT::ValueType vt = CN->getValueType(0); + MVT vt = CN->getValueType(0); Imm = (short) CN->getValue(); if (vt >= MVT::i1 && vt <= MVT::i16) { return true; @@ -139,7 +139,7 @@ namespace { static bool isFPS16Immediate(ConstantFPSDNode *FPN, short &Imm) { - MVT::ValueType vt = FPN->getValueType(0); + MVT vt = FPN->getValueType(0); if (vt == MVT::f32) { int val = FloatToBits(FPN->getValueAPF().convertToFloat()); int sval = (int) ((val << 16) >> 16); @@ -161,10 +161,10 @@ namespace { } //===------------------------------------------------------------------===// - //! MVT::ValueType to "useful stuff" mapping structure: + //! MVT to "useful stuff" mapping structure: struct valtype_map_s { - MVT::ValueType VT; + MVT VT; unsigned ldresult_ins; /// LDRESULT instruction (0 = undefined) bool ldresult_imm; /// LDRESULT instruction requires immediate? int prefslot_byte; /// Byte offset of the "preferred" slot @@ -189,7 +189,7 @@ namespace { const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]); - const valtype_map_s *getValueTypeMapEntry(MVT::ValueType VT) + const valtype_map_s *getValueTypeMapEntry(MVT VT) { const valtype_map_s *retval = 0; for (size_t i = 0; i < n_valtype_map; ++i) { @@ -203,7 +203,7 @@ namespace { #ifndef NDEBUG if (retval == 0) { cerr << "SPUISelDAGToDAG.cpp: getValueTypeMapEntry returns NULL for " - << MVT::getValueTypeString(VT) + << VT.getMVTString() << "\n"; abort(); } @@ -364,7 +364,7 @@ bool SPUDAGToDAGISel::SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Index) { // These match the addr256k operand type: - MVT::ValueType OffsVT = MVT::i16; + MVT OffsVT = MVT::i16; SDOperand Zero = CurDAG->getTargetConstant(0, OffsVT); switch (N.getOpcode()) { @@ -446,7 +446,7 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Bas SDOperand &Index, int minOffset, int maxOffset) { unsigned Opc = N.getOpcode(); - unsigned PtrTy = SPUtli.getPointerTy(); + MVT PtrTy = SPUtli.getPointerTy(); if (Opc == ISD::FrameIndex) { // Stack frame index must be less than 512 (divided by 16): @@ -587,7 +587,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { unsigned Opc = N->getOpcode(); int n_ops = -1; unsigned NewOpc; - MVT::ValueType OpVT = Op.getValueType(); + MVT OpVT = Op.getValueType(); SDOperand Ops[8]; if (Opc >= ISD::BUILTIN_OP_END && Opc < SPUISD::FIRST_NUMBER) { @@ -596,7 +596,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { // Selects to (add $sp, FI * stackSlotSize) int FI = SPUFrameInfo::FItoStackOffset(cast<FrameIndexSDNode>(N)->getIndex()); - MVT::ValueType PtrVT = SPUtli.getPointerTy(); + MVT PtrVT = SPUtli.getPointerTy(); // Adjust stack slot to actual offset in frame: if (isS10Constant(FI)) { @@ -636,7 +636,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { } } else if (Opc == SPUISD::LDRESULT) { // Custom select instructions for LDRESULT - unsigned VT = N->getValueType(0); + MVT VT = N->getValueType(0); SDOperand Arg = N->getOperand(0); SDOperand Chain = N->getOperand(1); SDNode *Result; @@ -644,7 +644,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { if (vtm->ldresult_ins == 0) { cerr << "LDRESULT for unsupported type: " - << MVT::getValueTypeString(VT) + << VT.getMVTString() << "\n"; abort(); } @@ -670,7 +670,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { /* || Op0.getOpcode() == SPUISD::AFormAddr) */ // (IndirectAddr (LDRESULT, imm)) SDOperand Op1 = Op.getOperand(1); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); DEBUG(cerr << "CellSPU: IndirectAddr(LDRESULT, imm):\nOp0 = "); DEBUG(Op.getOperand(0).Val->dump(CurDAG)); diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 0a736d7..36c73b8 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -38,9 +38,9 @@ using namespace llvm; namespace { std::map<unsigned, const char *> node_names; - //! MVT::ValueType mapping to useful data for Cell SPU + //! MVT mapping to useful data for Cell SPU struct valtype_map_s { - const MVT::ValueType valtype; + const MVT valtype; const int prefslot_byte; }; @@ -57,7 +57,7 @@ namespace { const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]); - const valtype_map_s *getValueTypeMapEntry(MVT::ValueType VT) { + const valtype_map_s *getValueTypeMapEntry(MVT VT) { const valtype_map_s *retval = 0; for (size_t i = 0; i < n_valtype_map; ++i) { @@ -70,7 +70,7 @@ namespace { #ifndef NDEBUG if (retval == 0) { cerr << "getValueTypeMapEntry returns NULL for " - << MVT::getValueTypeString(VT) + << VT.getMVTString() << "\n"; abort(); } @@ -162,8 +162,10 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) // SPU's loads and stores have to be custom lowered: for (unsigned sctype = (unsigned) MVT::i1; sctype < (unsigned) MVT::f128; ++sctype) { - setOperationAction(ISD::LOAD, sctype, Custom); - setOperationAction(ISD::STORE, sctype, Custom); + MVT VT = (MVT::SimpleValueType)sctype; + + setOperationAction(ISD::LOAD, VT, Custom); + setOperationAction(ISD::STORE, VT, Custom); } // Custom lower BRCOND for i1, i8 to "promote" the result to @@ -296,9 +298,11 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) // appropriate instructions to materialize the address. for (unsigned sctype = (unsigned) MVT::i1; sctype < (unsigned) MVT::f128; ++sctype) { - setOperationAction(ISD::GlobalAddress, sctype, Custom); - setOperationAction(ISD::ConstantPool, sctype, Custom); - setOperationAction(ISD::JumpTable, sctype, Custom); + MVT VT = (MVT::SimpleValueType)sctype; + + setOperationAction(ISD::GlobalAddress, VT, Custom); + setOperationAction(ISD::ConstantPool, VT, Custom); + setOperationAction(ISD::JumpTable, VT, Custom); } // RET must be custom lowered, to meet ABI requirements @@ -335,36 +339,38 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass); addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass); - for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; - VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { + for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; + i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { + MVT VT = (MVT::SimpleValueType)i; + // add/sub are legal for all supported vector VT's. - setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); + setOperationAction(ISD::ADD , VT, Legal); + setOperationAction(ISD::SUB , VT, Legal); // mul has to be custom lowered. - setOperationAction(ISD::MUL , (MVT::ValueType)VT, Custom); - - setOperationAction(ISD::AND , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::OR , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::XOR , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Legal); - setOperationAction(ISD::STORE, (MVT::ValueType)VT, Legal); + setOperationAction(ISD::MUL , VT, Custom); + + setOperationAction(ISD::AND , VT, Legal); + setOperationAction(ISD::OR , VT, Legal); + setOperationAction(ISD::XOR , VT, Legal); + setOperationAction(ISD::LOAD , VT, Legal); + setOperationAction(ISD::SELECT, VT, Legal); + setOperationAction(ISD::STORE, VT, Legal); // These operations need to be expanded: - setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Custom); + setOperationAction(ISD::SDIV, VT, Expand); + setOperationAction(ISD::SREM, VT, Expand); + setOperationAction(ISD::UDIV, VT, Expand); + setOperationAction(ISD::UREM, VT, Expand); + setOperationAction(ISD::FDIV, VT, Custom); // Custom lower build_vector, constant pool spills, insert and // extract vector elements: - setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::ConstantPool, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); + setOperationAction(ISD::BUILD_VECTOR, VT, Custom); + setOperationAction(ISD::ConstantPool, VT, Custom); + setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); } setOperationAction(ISD::MUL, MVT::v16i8, Custom); @@ -447,10 +453,9 @@ SPUTargetLowering::getTargetNodeName(unsigned Opcode) const return ((i != node_names.end()) ? i->second : 0); } -MVT::ValueType -SPUTargetLowering::getSetCCResultType(const SDOperand &Op) const { - MVT::ValueType VT = Op.getValueType(); - if (MVT::isInteger(VT)) +MVT SPUTargetLowering::getSetCCResultType(const SDOperand &Op) const { + MVT VT = Op.getValueType(); + if (VT.isInteger()) return VT; else return MVT::i32; @@ -490,9 +495,9 @@ static SDOperand AlignedLoad(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST, LSBaseSDNode *LSN, unsigned &alignment, int &alignOffs, int &prefSlotOffs, - MVT::ValueType &VT, bool &was16aligned) + MVT &VT, bool &was16aligned) { - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); const valtype_map_s *vtm = getValueTypeMapEntry(VT); SDOperand basePtr = LSN->getBasePtr(); SDOperand chain = LSN->getChain(); @@ -573,8 +578,8 @@ static SDOperand LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { LoadSDNode *LN = cast<LoadSDNode>(Op); SDOperand the_chain = LN->getChain(); - MVT::ValueType VT = LN->getMemoryVT(); - MVT::ValueType OpVT = Op.Val->getValueType(0); + MVT VT = LN->getMemoryVT(); + MVT OpVT = Op.Val->getValueType(0); ISD::LoadExtType ExtType = LN->getExtensionType(); unsigned alignment = LN->getAlignment(); SDOperand Ops[8]; @@ -601,7 +606,7 @@ LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { if (was16aligned) { Ops[2] = DAG.getConstant(rotamt, MVT::i16); } else { - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); LoadSDNode *LN1 = cast<LoadSDNode>(result); Ops[2] = DAG.getNode(ISD::ADD, PtrVT, LN1->getBasePtr(), DAG.getConstant(rotamt, PtrVT)); @@ -613,15 +618,15 @@ LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { if (VT == OpVT || ExtType == ISD::EXTLOAD) { SDVTList scalarvts; - MVT::ValueType vecVT = MVT::v16i8; + MVT vecVT = MVT::v16i8; // Convert the loaded v16i8 vector to the appropriate vector type // specified by the operand: if (OpVT == VT) { if (VT != MVT::i1) - vecVT = MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT))); + vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); } else - vecVT = MVT::getVectorType(OpVT, (128 / MVT::getSizeInBits(OpVT))); + vecVT = MVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits())); Ops[0] = the_chain; Ops[1] = DAG.getNode(ISD::BIT_CONVERT, vecVT, result); @@ -681,9 +686,9 @@ static SDOperand LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { StoreSDNode *SN = cast<StoreSDNode>(Op); SDOperand Value = SN->getValue(); - MVT::ValueType VT = Value.getValueType(); - MVT::ValueType StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT()); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT VT = Value.getValueType(); + MVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT()); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); unsigned alignment = SN->getAlignment(); switch (SN->getAddressingMode()) { @@ -693,11 +698,11 @@ LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { // The vector type we really want to load from the 16-byte chunk, except // in the case of MVT::i1, which has to be v16i8. - unsigned vecVT, stVecVT = MVT::v16i8; + MVT vecVT, stVecVT = MVT::v16i8; if (StVT != MVT::i1) - stVecVT = MVT::getVectorType(StVT, (128 / MVT::getSizeInBits(StVT))); - vecVT = MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT))); + stVecVT = MVT::getVectorVT(StVT, (128 / StVT.getSizeInBits())); + vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); SDOperand alignLoadVec = AlignedLoad(Op, DAG, ST, SN, alignment, @@ -773,7 +778,7 @@ LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { /// Generate the address of a constant pool entry. static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); Constant *C = CP->getConstVal(); SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); @@ -798,7 +803,7 @@ LowerConstantPool(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); SDOperand Zero = DAG.getConstant(0, PtrVT); @@ -821,7 +826,7 @@ LowerJumpTable(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); GlobalValue *GV = GSDN->getGlobal(); SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); @@ -853,7 +858,7 @@ LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { */ static SDOperand LowerConstant(SDOperand Op, SelectionDAG &DAG) { - unsigned VT = Op.getValueType(); + MVT VT = Op.getValueType(); ConstantSDNode *CN = cast<ConstantSDNode>(Op.Val); if (VT == MVT::i64) { @@ -862,7 +867,7 @@ LowerConstant(SDOperand Op, SelectionDAG &DAG) { DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T)); } else { cerr << "LowerConstant: unhandled constant type " - << MVT::getValueTypeString(VT) + << VT.getMVTString() << "\n"; abort(); /*NOTREACHED*/ @@ -874,7 +879,7 @@ LowerConstant(SDOperand Op, SelectionDAG &DAG) { //! Custom lower double precision floating point constants static SDOperand LowerConstantFP(SDOperand Op, SelectionDAG &DAG) { - unsigned VT = Op.getValueType(); + MVT VT = Op.getValueType(); ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.Val); assert((FP != 0) && @@ -894,8 +899,8 @@ static SDOperand LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { SDOperand Cond = Op.getOperand(1); - MVT::ValueType CondVT = Cond.getValueType(); - MVT::ValueType CondNVT; + MVT CondVT = Cond.getValueType(); + MVT CondNVT; if (CondVT == MVT::i1 || CondVT == MVT::i8) { CondNVT = (CondVT == MVT::i1 ? MVT::i32 : MVT::i16); @@ -924,19 +929,19 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) unsigned ArgRegIdx = 0; unsigned StackSlotSize = SPUFrameInfo::stackSlotSize(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Add DAG nodes to load the arguments or copy them out of registers. for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { SDOperand ArgVal; bool needsLoad = false; - MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); - unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; + MVT ObjectVT = Op.getValue(ArgNo).getValueType(); + unsigned ObjSize = ObjectVT.getSizeInBits()/8; - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: { cerr << "LowerFORMAL_ARGUMENTS Unhandled argument type: " - << MVT::getValueTypeString(ObjectVT) + << ObjectVT.getMVTString() << "\n"; abort(); } @@ -1032,7 +1037,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) // If the function takes variable number of arguments, make a frame index for // the start of the first vararg value... for expansion of llvm.va_start. if (isVarArg) { - VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, + VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, ArgOffset); SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); // If this function is vararg, store any remaining integer argument regs to @@ -1046,7 +1051,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by four for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); + SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } if (!MemOps.empty()) @@ -1056,7 +1061,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) ArgValues.push_back(Root); // Return the new list of results. - std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), + std::vector<MVT> RetVT(Op.Val->value_begin(), Op.Val->value_end()); return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); } @@ -1090,7 +1095,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs(); // Handy pointer type - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Accumulate how many bytes are to be pushed on the stack, including the // linkage area, and parameter passing area. According to the SPU ABI, @@ -1120,7 +1125,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff); - switch (Arg.getValueType()) { + switch (Arg.getValueType().getSimpleVT()) { default: assert(0 && "Unexpected ValueType for argument!"); case MVT::i32: case MVT::i64: @@ -1174,7 +1179,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { InFlag = Chain.getValue(1); } - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. @@ -1186,7 +1191,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { // node so that legalize doesn't hack it. if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { GlobalValue *GV = G->getGlobal(); - unsigned CalleeVT = Callee.getValueType(); + MVT CalleeVT = Callee.getValueType(); SDOperand Zero = DAG.getConstant(0, PtrVT); SDOperand GA = DAG.getTargetGlobalAddress(GV, CalleeVT); @@ -1243,7 +1248,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { NodeTys.clear(); // If the call has results, copy the values out of the ret val registers. - switch (Op.Val->getValueType(0)) { + switch (Op.Val->getValueType(0).getSimpleVT()) { default: assert(0 && "Unexpected ret value!"); case MVT::Other: break; case MVT::i32: @@ -1365,7 +1370,7 @@ getVecImm(SDNode *N) { /// and the value fits into an unsigned 18-bit constant, and if so, return the /// constant SDOperand SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType) { + MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { uint64_t Value = CN->getValue(); if (ValueType == MVT::i64) { @@ -1387,7 +1392,7 @@ SDOperand SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG, /// and the value fits into a signed 16-bit constant, and if so, return the /// constant SDOperand SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType) { + MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { int64_t Value = CN->getSignExtended(); if (ValueType == MVT::i64) { @@ -1410,7 +1415,7 @@ SDOperand SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG, /// and the value fits into a signed 10-bit constant, and if so, return the /// constant SDOperand SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType) { + MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { int64_t Value = CN->getSignExtended(); if (ValueType == MVT::i64) { @@ -1436,7 +1441,7 @@ SDOperand SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG, /// constant vectors. Thus, we test to see if the upper and lower bytes are the /// same value. SDOperand SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType) { + MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { int Value = (int) CN->getValue(); if (ValueType == MVT::i16 @@ -1455,7 +1460,7 @@ SDOperand SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG, /// and the value fits into a signed 16-bit constant, and if so, return the /// constant SDOperand SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType) { + MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { uint64_t Value = CN->getValue(); if ((ValueType == MVT::i32 @@ -1495,7 +1500,7 @@ static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], // Start with zero'd results. VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; - unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); + unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits(); for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { SDOperand OpVal = BV->getOperand(i); @@ -1597,7 +1602,7 @@ static bool isConstantSplat(const uint64_t Bits128[2], // this case more efficiently than a constant pool load, lower it to the // sequence of ops that should be used. static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); // If this is a vector of constants or undefs, get the bits. A bit in // UndefBits is set if the corresponding element of the vector is an // ISD::UNDEF value. For undefs, the corresponding VectorBits values are @@ -1608,11 +1613,11 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { int SplatSize; if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits) || !isConstantSplat(VectorBits, UndefBits, - MVT::getSizeInBits(MVT::getVectorElementType(VT)), + VT.getVectorElementType().getSizeInBits(), SplatBits, SplatUndef, SplatSize)) return SDOperand(); // Not a constant vector, not a splat. - switch (VT) { + switch (VT.getSimpleVT()) { default: case MVT::v4f32: { uint32_t Value32 = SplatBits; @@ -1649,14 +1654,14 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { Value16 = (unsigned short) (SplatBits & 0xffff); else Value16 = (unsigned short) (SplatBits | (SplatBits << 8)); - SDOperand T = DAG.getConstant(Value16, MVT::getVectorElementType(VT)); + SDOperand T = DAG.getConstant(Value16, VT.getVectorElementType()); SDOperand Ops[8]; for (int i = 0; i < 8; ++i) Ops[i] = T; return DAG.getNode(ISD::BUILD_VECTOR, VT, Ops, 8); } case MVT::v4i32: { unsigned int Value = SplatBits; - SDOperand T = DAG.getConstant(Value, MVT::getVectorElementType(VT)); + SDOperand T = DAG.getConstant(Value, VT.getVectorElementType()); return DAG.getNode(ISD::BUILD_VECTOR, VT, T, T, T, T); } case MVT::v2i64: { @@ -1772,7 +1777,7 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { // If we have a single element being moved from V1 to V2, this can be handled // using the C*[DX] compute mask instructions, but the vector elements have // to be monotonically increasing with one exception element. - MVT::ValueType EltVT = MVT::getVectorElementType(V1.getValueType()); + MVT EltVT = V1.getValueType().getVectorElementType(); unsigned EltsFromV2 = 0; unsigned V2Elt = 0; unsigned V2EltIdx0 = 0; @@ -1811,7 +1816,7 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Initialize temporary register to 0 SDOperand InitTempReg = DAG.getCopyToReg(DAG.getEntryNode(), VReg, DAG.getConstant(0, PtrVT)); @@ -1824,7 +1829,7 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { return DAG.getNode(SPUISD::SHUFB, V1.getValueType(), V2, V1, ShufMaskOp); } else { // Convert the SHUFFLE_VECTOR mask's input element units to the actual bytes. - unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; + unsigned BytesPerElement = EltVT.getSizeInBits()/8; SmallVector<SDOperand, 16> ResultMask; for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { @@ -1855,11 +1860,11 @@ static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { ConstantSDNode *CN = cast<ConstantSDNode>(Op0.Val); SmallVector<SDOperand, 16> ConstVecValues; - MVT::ValueType VT; + MVT VT; size_t n_copies; // Create a constant vector: - switch (Op.getValueType()) { + switch (Op.getValueType().getSimpleVT()) { default: assert(0 && "Unexpected constant value type in " "LowerSCALAR_TO_VECTOR"); case MVT::v16i8: n_copies = 16; VT = MVT::i8; break; @@ -1878,7 +1883,7 @@ static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { &ConstVecValues[0], ConstVecValues.size()); } else { // Otherwise, copy the value from one register to another: - switch (Op0.getValueType()) { + switch (Op0.getValueType().getSimpleVT()) { default: assert(0 && "Unexpected value type in LowerSCALAR_TO_VECTOR"); case MVT::i8: case MVT::i16: @@ -1894,7 +1899,14 @@ static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { } static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { - switch (Op.getValueType()) { + switch (Op.getValueType().getSimpleVT()) { + default: + cerr << "CellSPU: Unknown vector multiplication, got " + << Op.getValueType().getMVTString() + << "\n"; + abort(); + /*NOTREACHED*/ + case MVT::v4i32: { SDOperand rA = Op.getOperand(0); SDOperand rB = Op.getOperand(1); @@ -2020,13 +2032,6 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { DAG.getNode(ISD::OR, MVT::v4i32, LoProd, HiProd)); } - - default: - cerr << "CellSPU: Unknown vector multiplication, got " - << MVT::getValueTypeString(Op.getValueType()) - << "\n"; - abort(); - /*NOTREACHED*/ } return SDOperand(); @@ -2038,7 +2043,7 @@ static SDOperand LowerFDIVf32(SDOperand Op, SelectionDAG &DAG) { SDOperand A = Op.getOperand(0); SDOperand B = Op.getOperand(1); - unsigned VT = Op.getValueType(); + MVT VT = Op.getValueType(); unsigned VRegBR, VRegC; @@ -2077,7 +2082,7 @@ static SDOperand LowerFDIVf32(SDOperand Op, SelectionDAG &DAG) { } static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { - unsigned VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand N = Op.getOperand(0); SDOperand Elt = Op.getOperand(1); SDOperand ShufMask[16]; @@ -2104,9 +2109,11 @@ static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { // Need to generate shuffle mask and extract: int prefslot_begin = -1, prefslot_end = -1; - int elt_byte = EltNo * MVT::getSizeInBits(VT) / 8; + int elt_byte = EltNo * VT.getSizeInBits() / 8; - switch (VT) { + switch (VT.getSimpleVT()) { + default: + assert(false && "Invalid value type!"); case MVT::i8: { prefslot_begin = prefslot_end = 3; break; @@ -2159,12 +2166,12 @@ static SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { SDOperand VecOp = Op.getOperand(0); SDOperand ValOp = Op.getOperand(1); SDOperand IdxOp = Op.getOperand(2); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp); assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!"); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Use $2 because it's always 16-byte aligned and it's available: SDOperand PtrBase = DAG.getRegister(SPU::R2, PtrVT); @@ -2270,9 +2277,8 @@ static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) { - MVT::ValueType VT = Op.getValueType(); - unsigned VecVT = - MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT))); + MVT VT = Op.getValueType(); + MVT VecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); SDOperand Op0 = Op.getOperand(0); @@ -2280,9 +2286,8 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) case ISD::ZERO_EXTEND: case ISD::SIGN_EXTEND: case ISD::ANY_EXTEND: { - MVT::ValueType Op0VT = Op0.getValueType(); - unsigned Op0VecVT = - MVT::getVectorType(Op0VT, (128 / MVT::getSizeInBits(Op0VT))); + MVT Op0VT = Op0.getValueType(); + MVT Op0VecVT = MVT::getVectorVT(Op0VT, (128 / Op0VT.getSizeInBits())); assert(Op0VT == MVT::i32 && "CellSPU: Zero/sign extending something other than i32"); @@ -2361,7 +2366,7 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) case ISD::SHL: { SDOperand ShiftAmt = Op.getOperand(1); - unsigned ShiftAmtVT = unsigned(ShiftAmt.getValueType()); + MVT ShiftAmtVT = ShiftAmt.getValueType(); SDOperand Op0Vec = DAG.getNode(SPUISD::PROMOTE_SCALAR, VecVT, Op0); SDOperand MaskLower = DAG.getNode(SPUISD::SELB, VecVT, @@ -2386,9 +2391,9 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) } case ISD::SRL: { - unsigned VT = unsigned(Op.getValueType()); + MVT VT = Op.getValueType(); SDOperand ShiftAmt = Op.getOperand(1); - unsigned ShiftAmtVT = unsigned(ShiftAmt.getValueType()); + MVT ShiftAmtVT = ShiftAmt.getValueType(); SDOperand ShiftAmtBytes = DAG.getNode(ISD::SRL, ShiftAmtVT, ShiftAmt, @@ -2409,7 +2414,7 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) SDOperand Op0 = DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(0)); SDOperand ShiftAmt = Op.getOperand(1); - unsigned ShiftVT = ShiftAmt.getValueType(); + MVT ShiftVT = ShiftAmt.getValueType(); // Negate variable shift amounts if (!isa<ConstantSDNode>(ShiftAmt)) { @@ -2450,7 +2455,7 @@ static SDOperand LowerByteImmed(SDOperand Op, SelectionDAG &DAG) { SDOperand ConstVec; SDOperand Arg; - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); ConstVec = Op.getOperand(0); Arg = Op.getOperand(1); @@ -2474,7 +2479,7 @@ LowerByteImmed(SDOperand Op, SelectionDAG &DAG) { if (!GetConstantBuildVectorBits(ConstVec.Val, VectorBits, UndefBits) && isConstantSplat(VectorBits, UndefBits, - MVT::getSizeInBits(MVT::getVectorElementType(VT)), + VT.getVectorElementType().getSizeInBits(), SplatBits, SplatUndef, SplatSize)) { SDOperand tcVec[16]; SDOperand tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8); @@ -2493,12 +2498,12 @@ LowerByteImmed(SDOperand Op, SelectionDAG &DAG) { } //! Lower i32 multiplication -static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, unsigned VT, +static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, MVT VT, unsigned Opc) { - switch (VT) { + switch (VT.getSimpleVT()) { default: cerr << "CellSPU: Unknown LowerMUL value type, got " - << MVT::getValueTypeString(Op.getValueType()) + << Op.getValueType().getMVTString() << "\n"; abort(); /*NOTREACHED*/ @@ -2525,10 +2530,12 @@ static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, unsigned VT, ones per byte, which then have to be accumulated. */ static SDOperand LowerCTPOP(SDOperand Op, SelectionDAG &DAG) { - unsigned VT = Op.getValueType(); - unsigned vecVT = MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT))); + MVT VT = Op.getValueType(); + MVT vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); - switch (VT) { + switch (VT.getSimpleVT()) { + default: + assert(false && "Invalid value type!"); case MVT::i8: { SDOperand N = Op.getOperand(0); SDOperand Elt0 = DAG.getConstant(0, MVT::i32); @@ -2630,7 +2637,7 @@ SDOperand SPUTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { unsigned Opc = (unsigned) Op.getOpcode(); - unsigned VT = (unsigned) Op.getValueType(); + MVT VT = Op.getValueType(); switch (Opc) { default: { @@ -2704,7 +2711,7 @@ SPUTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) // Vector and i8 multiply: case ISD::MUL: - if (MVT::isVector(VT)) + if (VT.isVector()) return LowerVectorMUL(Op, DAG); else if (VT == MVT::i8) return LowerI8Math(Op, DAG, Opc); @@ -2911,7 +2918,7 @@ SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const std::pair<unsigned, const TargetRegisterClass*> SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const + MVT VT) const { if (Constraint.size() == 1) { // GCC RS6000 Constraint Letters @@ -2961,9 +2968,9 @@ SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, case SPUISD::PROMOTE_SCALAR: { SDOperand Op0 = Op.getOperand(0); - MVT::ValueType Op0VT = Op0.getValueType(); - unsigned Op0VTBits = MVT::getSizeInBits(Op0VT); - uint64_t InMask = MVT::getIntVTBitMask(Op0VT); + MVT Op0VT = Op0.getValueType(); + unsigned Op0VTBits = Op0VT.getSizeInBits(); + uint64_t InMask = Op0VT.getIntegerVTBitMask(); KnownZero |= APInt(Op0VTBits, ~InMask, false); KnownOne |= APInt(Op0VTBits, InMask, false); break; @@ -2972,9 +2979,9 @@ SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, case SPUISD::LDRESULT: case SPUISD::EXTRACT_ELT0: case SPUISD::EXTRACT_ELT0_CHAINED: { - MVT::ValueType OpVT = Op.getValueType(); - unsigned OpVTBits = MVT::getSizeInBits(OpVT); - uint64_t InMask = MVT::getIntVTBitMask(OpVT); + MVT OpVT = Op.getValueType(); + unsigned OpVTBits = OpVT.getSizeInBits(); + uint64_t InMask = OpVT.getIntegerVTBitMask(); KnownZero |= APInt(OpVTBits, ~InMask, false); KnownOne |= APInt(OpVTBits, InMask, false); break; diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h index 5632ee3..5c41c29 100644 --- a/lib/Target/CellSPU/SPUISelLowering.h +++ b/lib/Target/CellSPU/SPUISelLowering.h @@ -79,15 +79,15 @@ namespace llvm { /// Predicates that are used for node matching: namespace SPU { SDOperand get_vec_u18imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType); + MVT ValueType); SDOperand get_vec_i16imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType); + MVT ValueType); SDOperand get_vec_i10imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType); + MVT ValueType); SDOperand get_vec_i8imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType); + MVT ValueType); SDOperand get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG, - MVT::ValueType ValueType); + MVT ValueType); SDOperand get_v4i32_imm(SDNode *N, SelectionDAG &DAG); SDOperand get_v2i64_imm(SDNode *N, SelectionDAG &DAG); } @@ -109,7 +109,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ValueType for ISD::SETCC - virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDOperand &) const; /// LowerOperation - Provide custom lowering hooks for some operations. /// @@ -128,7 +128,7 @@ namespace llvm { std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; void LowerAsmOperandForConstraint(SDOperand Op, char ConstraintLetter, std::vector<SDOperand> &Ops, diff --git a/lib/Target/IA64/IA64ISelDAGToDAG.cpp b/lib/Target/IA64/IA64ISelDAGToDAG.cpp index da968b9..0a80653 100644 --- a/lib/Target/IA64/IA64ISelDAGToDAG.cpp +++ b/lib/Target/IA64/IA64ISelDAGToDAG.cpp @@ -119,7 +119,7 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDOperand Op) { bool isFP=false; - if(MVT::isFloatingPoint(Tmp1.getValueType())) + if(Tmp1.getValueType().isFloatingPoint()) isFP=true; bool isModulus=false; // is it a division or a modulus? @@ -469,9 +469,9 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { AddToISelQueue(Chain); AddToISelQueue(Address); - MVT::ValueType TypeBeingLoaded = LD->getMemoryVT(); + MVT TypeBeingLoaded = LD->getMemoryVT(); unsigned Opc; - switch (TypeBeingLoaded) { + switch (TypeBeingLoaded.getSimpleVT()) { default: #ifndef NDEBUG N->dump(CurDAG); @@ -511,7 +511,7 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { unsigned Opc; if (ISD::isNON_TRUNCStore(N)) { - switch (N->getOperand(1).getValueType()) { + switch (N->getOperand(1).getValueType().getSimpleVT()) { default: assert(0 && "unknown type in store"); case MVT::i1: { // this is a bool Opc = IA64::ST1; // we store either 0 or 1 as a byte @@ -531,7 +531,7 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { case MVT::f64: Opc = IA64::STF8; break; } } else { // Truncating store - switch(ST->getMemoryVT()) { + switch(ST->getMemoryVT().getSimpleVT()) { default: assert(0 && "unknown type in truncstore"); case MVT::i8: Opc = IA64::ST1; break; case MVT::i16: Opc = IA64::ST2; break; diff --git a/lib/Target/IA64/IA64ISelLowering.cpp b/lib/Target/IA64/IA64ISelLowering.cpp index 5d29200..8b711d8 100644 --- a/lib/Target/IA64/IA64ISelLowering.cpp +++ b/lib/Target/IA64/IA64ISelLowering.cpp @@ -139,8 +139,7 @@ const char *IA64TargetLowering::getTargetNodeName(unsigned Opcode) const { } } -MVT::ValueType -IA64TargetLowering::getSetCCResultType(const SDOperand &) const { +MVT IA64TargetLowering::getSetCCResultType(const SDOperand &) const { return MVT::i1; } @@ -181,7 +180,7 @@ IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { SDOperand newroot, argt; if(count < 8) { // need to fix this logic? maybe. - switch (getValueType(I->getType())) { + switch (getValueType(I->getType()).getSimpleVT()) { default: assert(0 && "ERROR in LowerArgs: can't lower this type of arg.\n"); case MVT::f32: @@ -286,7 +285,7 @@ IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { // Finally, inform the code generator which regs we return values in. // (see the ISD::RET: case in the instruction selector) - switch (getValueType(F.getReturnType())) { + switch (getValueType(F.getReturnType()).getSimpleVT()) { default: assert(0 && "i have no idea where to return this type!"); case MVT::isVoid: break; case MVT::i1: @@ -347,10 +346,10 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, for (unsigned i = 0, e = Args.size(); i != e; ++i) { SDOperand Val = Args[i].Node; - MVT::ValueType ObjectVT = Val.getValueType(); + MVT ObjectVT = Val.getValueType(); SDOperand ValToStore(0, 0), ValToConvert(0, 0); unsigned ObjSize=8; - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: assert(0 && "unexpected argument type!"); case MVT::i1: case MVT::i8: @@ -442,7 +441,7 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, // flagged for now, but shouldn't have to be (TODO) unsigned seenConverts = 0; for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) { - if(MVT::isFloatingPoint(RegValuesToPass[i].getValueType())) { + if(RegValuesToPass[i].getValueType().isFloatingPoint()) { Chain = DAG.getCopyToReg(Chain, IntArgRegs[i], Converts[seenConverts++], InFlag); InFlag = Chain.getValue(1); @@ -453,7 +452,7 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, unsigned usedFPArgs = 0; for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, - MVT::isInteger(RegValuesToPass[i].getValueType()) ? + RegValuesToPass[i].getValueType().isInteger() ? IntArgRegs[i] : FPArgRegs[usedFPArgs++], RegValuesToPass[i], InFlag); InFlag = Chain.getValue(1); } @@ -466,7 +465,7 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, } */ - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; std::vector<SDOperand> CallOperands; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. @@ -492,14 +491,14 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, Chain = DAG.getCopyToReg(Chain, IA64::rp, RPBeforeCall, InFlag); InFlag = Chain.getValue(1); - std::vector<MVT::ValueType> RetVals; + std::vector<MVT> RetVals; RetVals.push_back(MVT::Other); RetVals.push_back(MVT::Flag); - MVT::ValueType RetTyVT = getValueType(RetTy); + MVT RetTyVT = getValueType(RetTy); SDOperand RetVal; if (RetTyVT != MVT::isVoid) { - switch (RetTyVT) { + switch (RetTyVT.getSimpleVT()) { default: assert(0 && "Unknown value type to return!"); case MVT::i1: { // bools are just like other integers (returned in r8) // we *could* fall through to the truncate below, but this saves a @@ -573,8 +572,8 @@ LowerOperation(SDOperand Op, SelectionDAG &DAG) { return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other, AR_PFSVal); case 3: { // Copy the result into the output register & restore ar.pfs - MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); - unsigned ArgReg = MVT::isInteger(ArgVT) ? IA64::r8 : IA64::F8; + MVT ArgVT = Op.getOperand(1).getValueType(); + unsigned ArgReg = ArgVT.isInteger() ? IA64::r8 : IA64::F8; AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64); Copy = DAG.getCopyToReg(AR_PFSVal.getValue(1), ArgReg, Op.getOperand(1), @@ -588,13 +587,13 @@ LowerOperation(SDOperand Op, SelectionDAG &DAG) { return SDOperand(); } case ISD::VAARG: { - MVT::ValueType VT = getPointerTy(); + MVT VT = getPointerTy(); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); SDOperand VAList = DAG.getLoad(VT, Op.getOperand(0), Op.getOperand(1), SV, 0); // Increment the pointer, VAList, to the next vaarg SDOperand VAIncr = DAG.getNode(ISD::ADD, VT, VAList, - DAG.getConstant(MVT::getSizeInBits(VT)/8, + DAG.getConstant(VT.getSizeInBits()/8, VT)); // Store the incremented VAList to the legalized pointer VAIncr = DAG.getStore(VAList.getValue(1), VAIncr, diff --git a/lib/Target/IA64/IA64ISelLowering.h b/lib/Target/IA64/IA64ISelLowering.h index aef51f0..b26c822 100644 --- a/lib/Target/IA64/IA64ISelLowering.h +++ b/lib/Target/IA64/IA64ISelLowering.h @@ -49,7 +49,7 @@ namespace llvm { const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType: return ISD::SETCC's result type. - virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDOperand &) const; /// LowerArguments - This hook must be implemented to indicate how we should /// lower the arguments for the specified function, into the specified DAG. diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp index ceb4bed..283d693 100644 --- a/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -250,7 +250,7 @@ Select(SDOperand N) AddToISelQueue(LHS); AddToISelQueue(RHS); - MVT::ValueType VT = LHS.getValueType(); + MVT VT = LHS.getValueType(); SDNode *Carry = CurDAG->getTargetNode(Mips::SLTu, VT, Ops, 2); SDNode *AddCarry = CurDAG->getTargetNode(Mips::ADDu, VT, SDOperand(Carry,0), RHS); diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 0e233c7..16464d1 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -108,8 +108,7 @@ MipsTargetLowering(MipsTargetMachine &TM): TargetLowering(TM) } -MVT::ValueType -MipsTargetLowering::getSetCCResultType(const SDOperand &) const { +MVT MipsTargetLowering::getSetCCResultType(const SDOperand &) const { return MVT::i32; } @@ -223,7 +222,7 @@ LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) SDOperand HiPart; if (!isPIC) { - const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::i32); + const MVT *VTs = DAG.getNodeValueTypes(MVT::i32); SDOperand Ops[] = { GA }; HiPart = DAG.getNode(MipsISD::Hi, VTs, 1, Ops, 1); } else // Emit Load from Global Pointer @@ -256,7 +255,7 @@ LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) SDOperand False = Op.getOperand(3); SDOperand CC = Op.getOperand(4); - const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::i32); + const MVT *VTs = DAG.getNodeValueTypes(MVT::i32); SDOperand Ops[] = { LHS, RHS, CC }; SDOperand SetCCRes = DAG.getNode(ISD::SETCC, VTs, 1, Ops, 3); @@ -270,12 +269,12 @@ LowerJumpTable(SDOperand Op, SelectionDAG &DAG) SDOperand ResNode; SDOperand HiPart; - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { - const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::i32); + const MVT *VTs = DAG.getNodeValueTypes(MVT::i32); SDOperand Ops[] = { JTI }; HiPart = DAG.getNode(MipsISD::Hi, VTs, 1, Ops, 1); } else // Emit Load from Global Pointer @@ -341,7 +340,7 @@ LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) // To meet ABI, Mips must always allocate 16 bytes on // the stack (even if less than 4 are used as arguments) - int VTsize = MVT::getSizeInBits(MVT::i32)/8; + int VTsize = MVT(MVT::i32).getSizeInBits()/8; MFI->CreateFixedObject(VTsize, (VTsize*3)); CCInfo.AnalyzeCallOperands(Op.Val, CC_Mips); @@ -391,7 +390,7 @@ LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) // This guarantees that when allocating Local Area the firsts // 16 bytes which are alwayes reserved won't be overwritten. LastStackLoc = (16 + VA.getLocMemOffset()); - int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, + int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8, LastStackLoc); SDOperand PtrOff = DAG.getFrameIndex(FI,getPointerTy()); @@ -575,7 +574,7 @@ LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) // Arguments stored on registers if (VA.isRegLoc()) { - MVT::ValueType RegVT = VA.getLocVT(); + MVT RegVT = VA.getLocVT(); TargetRegisterClass *RC; if (RegVT == MVT::i32) @@ -738,8 +737,7 @@ getConstraintType(const std::string &Constraint) const } std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering:: -getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const +getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { @@ -753,7 +751,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, std::vector<unsigned> MipsTargetLowering:: getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const + MVT VT) const { if (Constraint.size() != 1) return std::vector<unsigned>(); diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h index cda36ac..6f621b4 100644 --- a/lib/Target/Mips/MipsISelLowering.h +++ b/lib/Target/Mips/MipsISelLowering.h @@ -66,7 +66,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - get the ISD::SETCC result ValueType - MVT::ValueType getSetCCResultType(const SDOperand &) const; + MVT getSetCCResultType(const SDOperand &) const; private: // Lower Operand helpers @@ -93,11 +93,11 @@ namespace llvm { std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; }; } diff --git a/lib/Target/PIC16/PIC16ISelLowering.cpp b/lib/Target/PIC16/PIC16ISelLowering.cpp index 210f216..deab5d5 100644 --- a/lib/Target/PIC16/PIC16ISelLowering.cpp +++ b/lib/Target/PIC16/PIC16ISelLowering.cpp @@ -204,7 +204,7 @@ SDOperand PIC16TargetLowering:: LowerOperation(SDOperand Op, SelectionDAG &DAG) SDOperand PIC16TargetLowering::LowerBR_CC(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); SDOperand Chain = Op.getOperand(0); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); SDOperand LHS = Op.getOperand(2); @@ -278,7 +278,7 @@ SDOperand PIC16TargetLowering::LowerBR_CC(SDOperand Op, SelectionDAG &DAG) SDOperand PIC16TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = getPointerTy(); + MVT PtrVT = getPointerTy(); GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); GlobalValue *GV = GSDN->getGlobal(); @@ -626,7 +626,10 @@ SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, return Stores[0]; } - switch(Src.getValueType()) { + switch(Src.getValueType().getSimpleVT()) { + default: + assert(false && "Invalid value type!"); + case MVT::i8: break; diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index d73d8aa..45a0831 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -921,7 +921,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { case ISD::LOAD: { // Handle preincrement loads. LoadSDNode *LD = cast<LoadSDNode>(Op); - MVT::ValueType LoadedVT = LD->getMemoryVT(); + MVT LoadedVT = LD->getMemoryVT(); // Normal loads are handled by code generated from the .td file. if (LD->getAddressingMode() != ISD::PRE_INC) @@ -936,7 +936,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { if (LD->getValueType(0) != MVT::i64) { // Handle PPC32 integer and normal FP loads. assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load"); - switch (LoadedVT) { + switch (LoadedVT.getSimpleVT()) { default: assert(0 && "Invalid PPC load type!"); case MVT::f64: Opcode = PPC::LFDU; break; case MVT::f32: Opcode = PPC::LFSU; break; @@ -948,7 +948,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { } else { assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!"); assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load"); - switch (LoadedVT) { + switch (LoadedVT.getSimpleVT()) { default: assert(0 && "Invalid PPC load type!"); case MVT::i64: Opcode = PPC::LDU; break; case MVT::i32: Opcode = PPC::LWZU8; break; diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 097e256..f528143 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -256,50 +256,52 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { // First set operation action for all vector types to expand. Then we // will selectively turn on ones that can be effectively codegen'd. - for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; - VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { + for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; + i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { + MVT VT = (MVT::SimpleValueType)i; + // add/sub are legal for all supported vector VT's. - setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); - setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); + setOperationAction(ISD::ADD , VT, Legal); + setOperationAction(ISD::SUB , VT, Legal); // We promote all shuffles to v16i8. - setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); + AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); // We promote all non-typed operations to v4i32. - setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32); - setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32); - setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32); - setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32); - setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32); - setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32); + setOperationAction(ISD::AND , VT, Promote); + AddPromotedToType (ISD::AND , VT, MVT::v4i32); + setOperationAction(ISD::OR , VT, Promote); + AddPromotedToType (ISD::OR , VT, MVT::v4i32); + setOperationAction(ISD::XOR , VT, Promote); + AddPromotedToType (ISD::XOR , VT, MVT::v4i32); + setOperationAction(ISD::LOAD , VT, Promote); + AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); + setOperationAction(ISD::SELECT, VT, Promote); + AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); + setOperationAction(ISD::STORE, VT, Promote); + AddPromotedToType (ISD::STORE, VT, MVT::v4i32); // No other operations are legal. - setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); + setOperationAction(ISD::MUL , VT, Expand); + setOperationAction(ISD::SDIV, VT, Expand); + setOperationAction(ISD::SREM, VT, Expand); + setOperationAction(ISD::UDIV, VT, Expand); + setOperationAction(ISD::UREM, VT, Expand); + setOperationAction(ISD::FDIV, VT, Expand); + setOperationAction(ISD::FNEG, VT, Expand); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); + setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); + setOperationAction(ISD::BUILD_VECTOR, VT, Expand); + setOperationAction(ISD::UMUL_LOHI, VT, Expand); + setOperationAction(ISD::SMUL_LOHI, VT, Expand); + setOperationAction(ISD::UDIVREM, VT, Expand); + setOperationAction(ISD::SDIVREM, VT, Expand); + setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); + setOperationAction(ISD::FPOW, VT, Expand); + setOperationAction(ISD::CTPOP, VT, Expand); + setOperationAction(ISD::CTLZ, VT, Expand); + setOperationAction(ISD::CTTZ, VT, Expand); } // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle @@ -420,8 +422,7 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { } -MVT::ValueType -PPCTargetLowering::getSetCCResultType(const SDOperand &) const { +MVT PPCTargetLowering::getSetCCResultType(const SDOperand &) const { return MVT::i32; } @@ -690,7 +691,7 @@ SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { uint64_t Value = 0; if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { Value = CN->getValue(); - ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8; + ValSizeInBytes = CN->getValueType(0).getSizeInBits()/8; } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); Value = FloatToBits(CN->getValueAPF().convertToFloat()); @@ -1007,7 +1008,7 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, if (!EnablePPCPreinc) return false; SDOperand Ptr; - MVT::ValueType VT; + MVT VT; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { Ptr = LD->getBasePtr(); VT = LD->getMemoryVT(); @@ -1020,7 +1021,7 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, return false; // PowerPC doesn't have preinc load/store instructions for vectors. - if (MVT::isVector(VT)) + if (VT.isVector()) return false; // TODO: Check reg+reg first. @@ -1055,7 +1056,7 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, SDOperand PPCTargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); Constant *C = CP->getConstVal(); SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); @@ -1086,7 +1087,7 @@ SDOperand PPCTargetLowering::LowerConstantPool(SDOperand Op, } SDOperand PPCTargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); SDOperand Zero = DAG.getConstant(0, PtrVT); @@ -1123,7 +1124,7 @@ SDOperand PPCTargetLowering::LowerGlobalTLSAddress(SDOperand Op, SDOperand PPCTargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType PtrVT = Op.getValueType(); + MVT PtrVT = Op.getValueType(); GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); GlobalValue *GV = GSDN->getGlobal(); SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); @@ -1170,13 +1171,13 @@ SDOperand PPCTargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { // fold the new nodes. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { if (C->isNullValue() && CC == ISD::SETEQ) { - MVT::ValueType VT = Op.getOperand(0).getValueType(); + MVT VT = Op.getOperand(0).getValueType(); SDOperand Zext = Op.getOperand(0); if (VT < MVT::i32) { VT = MVT::i32; Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); } - unsigned Log2b = Log2_32(MVT::getSizeInBits(VT)); + unsigned Log2b = Log2_32(VT.getSizeInBits()); SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, DAG.getConstant(Log2b, MVT::i32)); @@ -1194,9 +1195,9 @@ SDOperand PPCTargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { // condition register, reading it back out, and masking the correct bit. The // normal approach here uses sub to do this instead of xor. Using xor exposes // the result to other bit-twiddling opportunities. - MVT::ValueType LHSVT = Op.getOperand(0).getValueType(); - if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { - MVT::ValueType VT = Op.getValueType(); + MVT LHSVT = Op.getOperand(0).getValueType(); + if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { + MVT VT = Op.getValueType(); SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0), Op.getOperand(1)); return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); @@ -1225,7 +1226,7 @@ SDOperand PPCTargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG, if (Subtarget.isMachoABI()) { // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0); @@ -1260,15 +1261,15 @@ SDOperand PPCTargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG, SDOperand ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); SDOperand StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT); SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); - uint64_t FrameOffset = MVT::getSizeInBits(PtrVT)/8; + uint64_t FrameOffset = PtrVT.getSizeInBits()/8; SDOperand ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); - uint64_t StackOffset = MVT::getSizeInBits(PtrVT)/8 - 1; + uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; SDOperand ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); uint64_t FPROffset = 1; @@ -1325,9 +1326,9 @@ static const unsigned *GetFPR(const PPCSubtarget &Subtarget) { /// the stack. static unsigned CalculateStackSlotSize(SDOperand Arg, SDOperand Flag, bool isVarArg, unsigned PtrByteSize) { - MVT::ValueType ArgVT = Arg.getValueType(); + MVT ArgVT = Arg.getValueType(); ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Flag)->getArgFlags(); - unsigned ArgSize =MVT::getSizeInBits(ArgVT)/8; + unsigned ArgSize =ArgVT.getSizeInBits()/8; if (Flags.isByVal()) ArgSize = Flags.getByValSize(); ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; @@ -1352,7 +1353,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SDOperand Root = Op.getOperand(0); bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); bool isPPC64 = PtrVT == MVT::i64; bool isMachoABI = Subtarget.isMachoABI(); bool isELF32_ABI = Subtarget.isELF32_ABI(); @@ -1402,8 +1403,8 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, if (!isVarArg && !isPPC64) { for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { - MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); - unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; + MVT ObjectVT = Op.getValue(ArgNo).getValueType(); + unsigned ObjSize = ObjectVT.getSizeInBits()/8; ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags(); @@ -1416,7 +1417,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, continue; } - switch(ObjectVT) { + switch(ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); case MVT::i32: case MVT::f32: @@ -1453,8 +1454,8 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { SDOperand ArgVal; bool needsLoad = false; - MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); - unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; + MVT ObjectVT = Op.getValue(ArgNo).getValueType(); + unsigned ObjSize = ObjectVT.getSizeInBits()/8; unsigned ArgSize = ObjSize; ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags(); @@ -1535,7 +1536,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, continue; } - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); case MVT::i32: if (!isPPC64) { @@ -1693,18 +1694,18 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, // Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame // pointer. - depth = -(Num_GPR_Regs * MVT::getSizeInBits(PtrVT)/8 + - Num_FPR_Regs * MVT::getSizeInBits(MVT::f64)/8 + - MVT::getSizeInBits(PtrVT)/8); + depth = -(Num_GPR_Regs * PtrVT.getSizeInBits()/8 + + Num_FPR_Regs * MVT(MVT::f64).getSizeInBits()/8 + + PtrVT.getSizeInBits()/8); - VarArgsStackOffset = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, + VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, ArgOffset); } else depth = ArgOffset; - VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, + VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, depth); SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); @@ -1716,7 +1717,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by four for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); + SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } } @@ -1736,7 +1737,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by four for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); + SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } @@ -1748,7 +1749,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by eight for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8, + SDOperand PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } @@ -1762,7 +1763,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by eight for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8, + SDOperand PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } @@ -1775,7 +1776,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, ArgValues.push_back(Root); // Return the new list of results. - std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), + std::vector<MVT> RetVT(Op.Val->value_begin(), Op.Val->value_end()); return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); } @@ -1807,7 +1808,7 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, for (unsigned i = 0; i != NumOps; ++i) { SDOperand Arg = Call.getOperand(5+2*i); SDOperand Flag = Call.getOperand(5+2*i+1); - MVT::ValueType ArgVT = Arg.getValueType(); + MVT ArgVT = Arg.getValueType(); // Varargs Altivec parameters are padded to a 16 byte boundary. if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { @@ -1970,7 +1971,7 @@ static SDOperand EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, isMachoABI); int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc); - MVT::ValueType VT = isPPC64 ? MVT::i64 : MVT::i32; + MVT VT = isPPC64 ? MVT::i64 : MVT::i32; SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); Chain = DAG.getStore(Chain, OldRetAddr, NewRetAddrFrIdx, PseudoSourceValue::getFixedStack(), NewRetAddr); @@ -1988,9 +1989,9 @@ CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, SDOperand Arg, int SPDiff, unsigned ArgOffset, SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { int Offset = ArgOffset + SPDiff; - uint32_t OpSize = (MVT::getSizeInBits(Arg.getValueType())+7)/8; + uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); - MVT::ValueType VT = isPPC64 ? MVT::i64 : MVT::i32; + MVT VT = isPPC64 ? MVT::i64 : MVT::i32; SDOperand FIN = DAG.getFrameIndex(FI, VT); TailCallArgumentInfo Info; Info.Arg = Arg; @@ -2009,7 +2010,7 @@ SDOperand PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, SDOperand &FPOpOut) { if (SPDiff) { // Load the LR and FP stack slot for later adjusting. - MVT::ValueType VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; + MVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; LROpOut = getReturnAddrFrameIndex(DAG); LROpOut = DAG.getLoad(VT, Chain, LROpOut, NULL, 0); Chain = SDOperand(LROpOut.Val, 1); @@ -2043,7 +2044,7 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDOperand Chain, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVector<SDOperand, 8> &MemOpChains, SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); if (!isTailCall) { if (isVector) { SDOperand StackPtr; @@ -2074,7 +2075,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, bool isMachoABI = Subtarget.isMachoABI(); bool isELF32_ABI = Subtarget.isELF32_ABI(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); bool isPPC64 = PtrVT == MVT::i64; unsigned PtrByteSize = isPPC64 ? 8 : 4; @@ -2192,7 +2193,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, if (Size==1 || Size==2) { // Very small objects are passed right-justified. // Everything else is passed left-justified. - MVT::ValueType VT = (Size==1) ? MVT::i8 : MVT::i16; + MVT VT = (Size==1) ? MVT::i8 : MVT::i16; if (GPR_idx != NumGPRs) { SDOperand Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, Chain, Arg, NULL, 0, VT); @@ -2244,7 +2245,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, continue; } - switch (Arg.getValueType()) { + switch (Arg.getValueType().getSimpleVT()) { default: assert(0 && "Unexpected ValueType for argument!"); case MVT::i32: case MVT::i64: @@ -2384,7 +2385,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, ArgOffset += 12*16; for (unsigned i = 0; i != NumOps; ++i) { SDOperand Arg = Op.getOperand(5+2*i); - MVT::ValueType ArgType = Arg.getValueType(); + MVT ArgType = Arg.getValueType(); if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { if (++j > NumVRs) { @@ -2450,7 +2451,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, InFlag = Chain.getValue(1); } - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. @@ -2544,7 +2545,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // Copy all of the result registers out of their specified physreg. for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { CCValAssign &VA = RVLocs[i]; - MVT::ValueType VT = VA.getValVT(); + MVT VT = VA.getValVT(); assert(VA.isRegLoc() && "Can only return in registers!"); Chain = DAG.getCopyFromReg(Chain, VA.getLocReg(), VT, InFlag).getValue(1); ResultVals.push_back(Chain.getValue(0)); @@ -2629,7 +2630,7 @@ SDOperand PPCTargetLowering::LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG, // When we pop the dynamic allocation we need to restore the SP link. // Get the corect type for pointers. - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Construct the stack pointer operand. bool IsPPC64 = Subtarget.isPPC64(); @@ -2657,7 +2658,7 @@ PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { MachineFunction &MF = DAG.getMachineFunction(); bool IsPPC64 = PPCSubTarget.isPPC64(); bool isMachoABI = PPCSubTarget.isMachoABI(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Get current frame pointer save index. The users of this index will be // primarily DYNALLOC instructions. @@ -2681,7 +2682,7 @@ PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { MachineFunction &MF = DAG.getMachineFunction(); bool IsPPC64 = PPCSubTarget.isPPC64(); bool isMachoABI = PPCSubTarget.isMachoABI(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Get current frame pointer save index. The users of this index will be // primarily DYNALLOC instructions. @@ -2709,7 +2710,7 @@ SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, SDOperand Size = Op.getOperand(1); // Get the corect type for pointers. - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Negate the size. SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT, DAG.getConstant(0, PtrVT), Size); @@ -2722,13 +2723,13 @@ SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, } SDOperand PPCTargetLowering::LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.Val->getValueType(0); + MVT VT = Op.Val->getValueType(0); SDOperand Chain = Op.getOperand(0); SDOperand Ptr = Op.getOperand(1); SDOperand Incr = Op.getOperand(2); // Issue a "load and reserve". - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(VT); VTs.push_back(MVT::Other); @@ -2758,14 +2759,14 @@ SDOperand PPCTargetLowering::LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG) { } SDOperand PPCTargetLowering::LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.Val->getValueType(0); + MVT VT = Op.Val->getValueType(0); SDOperand Chain = Op.getOperand(0); SDOperand Ptr = Op.getOperand(1); SDOperand NewVal = Op.getOperand(2); SDOperand OldVal = Op.getOperand(3); // Issue a "load and reserve". - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(VT); VTs.push_back(MVT::Other); @@ -2801,13 +2802,13 @@ SDOperand PPCTargetLowering::LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG) { } SDOperand PPCTargetLowering::LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.Val->getValueType(0); + MVT VT = Op.Val->getValueType(0); SDOperand Chain = Op.getOperand(0); SDOperand Ptr = Op.getOperand(1); SDOperand NewVal = Op.getOperand(2); // Issue a "load and reserve". - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(VT); VTs.push_back(MVT::Other); @@ -2837,8 +2838,8 @@ SDOperand PPCTargetLowering::LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG) { /// possible. SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { // Not FP? Not a fsel. - if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) || - !MVT::isFloatingPoint(Op.getOperand(2).getValueType())) + if (!Op.getOperand(0).getValueType().isFloatingPoint() || + !Op.getOperand(2).getValueType().isFloatingPoint()) return SDOperand(); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); @@ -2846,8 +2847,8 @@ SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { // Cannot handle SETEQ/SETNE. if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand(); - MVT::ValueType ResVT = Op.getValueType(); - MVT::ValueType CmpVT = Op.getOperand(0).getValueType(); + MVT ResVT = Op.getValueType(); + MVT CmpVT = Op.getOperand(0).getValueType(); SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3); @@ -2916,13 +2917,13 @@ SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { // FIXME: Split this code up when LegalizeDAGTypes lands. SDOperand PPCTargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { - assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType())); + assert(Op.getOperand(0).getValueType().isFloatingPoint()); SDOperand Src = Op.getOperand(0); if (Src.getValueType() == MVT::f32) Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); SDOperand Tmp; - switch (Op.getValueType()) { + switch (Op.getValueType().getSimpleVT()) { default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); case MVT::i32: Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); @@ -2958,7 +2959,7 @@ SDOperand PPCTargetLowering::LowerFP_ROUND_INREG(SDOperand Op, // This sequence changes FPSCR to do round-to-zero, adds the two halves // of the long double, and puts FPSCR back the way it was. We do not // actually model FPSCR. - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; SDOperand Ops[4], Result, MFFSreg, InFlag, FPreg; NodeTys.push_back(MVT::f64); // Return register @@ -3026,7 +3027,7 @@ SDOperand PPCTargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { // then lfd it and fcfid it. MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); int FrameIdx = FrameInfo->CreateStackObject(8, 8); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, @@ -3069,9 +3070,9 @@ SDOperand PPCTargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { */ MachineFunction &MF = DAG.getMachineFunction(); - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); - std::vector<MVT::ValueType> NodeTys; + MVT VT = Op.getValueType(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + std::vector<MVT> NodeTys; SDOperand MFFSreg, InFlag; // Save FP Control Word to register @@ -3105,13 +3106,13 @@ SDOperand PPCTargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { SDOperand RetVal = DAG.getNode(ISD::XOR, MVT::i32, CWD1, CWD2); - return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? + return DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); } SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - unsigned BitWidth = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + unsigned BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && "Unexpected SHL!"); @@ -3121,7 +3122,7 @@ SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { SDOperand Lo = Op.getOperand(0); SDOperand Hi = Op.getOperand(1); SDOperand Amt = Op.getOperand(2); - MVT::ValueType AmtVT = Amt.getValueType(); + MVT AmtVT = Amt.getValueType(); SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, DAG.getConstant(BitWidth, AmtVT), Amt); @@ -3139,8 +3140,8 @@ SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { } SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - unsigned BitWidth = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + unsigned BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && "Unexpected SRL!"); @@ -3150,7 +3151,7 @@ SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { SDOperand Lo = Op.getOperand(0); SDOperand Hi = Op.getOperand(1); SDOperand Amt = Op.getOperand(2); - MVT::ValueType AmtVT = Amt.getValueType(); + MVT AmtVT = Amt.getValueType(); SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, DAG.getConstant(BitWidth, AmtVT), Amt); @@ -3168,8 +3169,8 @@ SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { } SDOperand PPCTargetLowering::LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - unsigned BitWidth = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + unsigned BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && "Unexpected SRA!"); @@ -3178,7 +3179,7 @@ SDOperand PPCTargetLowering::LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) { SDOperand Lo = Op.getOperand(0); SDOperand Hi = Op.getOperand(1); SDOperand Amt = Op.getOperand(2); - MVT::ValueType AmtVT = Amt.getValueType(); + MVT AmtVT = Amt.getValueType(); SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, DAG.getConstant(BitWidth, AmtVT), Amt); @@ -3210,7 +3211,7 @@ static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], // Start with zero'd results. VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; - unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); + unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits(); for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { SDOperand OpVal = BV->getOperand(i); @@ -3296,26 +3297,26 @@ static bool isConstantSplat(const uint64_t Bits128[2], /// BuildSplatI - Build a canonical splati of Val with an element size of /// SplatSize. Cast the result to VT. -static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT, +static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT VT, SelectionDAG &DAG) { assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); - static const MVT::ValueType VTys[] = { // canonical VT to use for each size. + static const MVT VTys[] = { // canonical VT to use for each size. MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 }; - MVT::ValueType ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; + MVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. if (Val == -1) SplatSize = 1; - MVT::ValueType CanonicalVT = VTys[SplatSize-1]; + MVT CanonicalVT = VTys[SplatSize-1]; // Build a canonical splat for this value. - SDOperand Elt = DAG.getConstant(Val, MVT::getVectorElementType(CanonicalVT)); + SDOperand Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType()); SmallVector<SDOperand, 8> Ops; - Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt); + Ops.assign(CanonicalVT.getVectorNumElements(), Elt); SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, &Ops[0], Ops.size()); return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res); @@ -3325,7 +3326,7 @@ static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT, /// specified intrinsic ID. static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, SelectionDAG &DAG, - MVT::ValueType DestVT = MVT::Other) { + MVT DestVT = MVT::Other) { if (DestVT == MVT::Other) DestVT = LHS.getValueType(); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, DAG.getConstant(IID, MVT::i32), LHS, RHS); @@ -3335,7 +3336,7 @@ static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, /// specified intrinsic ID. static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, SDOperand Op2, SelectionDAG &DAG, - MVT::ValueType DestVT = MVT::Other) { + MVT DestVT = MVT::Other) { if (DestVT == MVT::Other) DestVT = Op0.getValueType(); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); @@ -3345,7 +3346,7 @@ static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified /// amount. The result has the specified value type. static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt, - MVT::ValueType VT, SelectionDAG &DAG) { + MVT VT, SelectionDAG &DAG) { // Force LHS/RHS to be the right type. LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS); RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS); @@ -3705,8 +3706,8 @@ SDOperand PPCTargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except // that it is in input element units, not in bytes. Convert now. - MVT::ValueType EltVT = MVT::getVectorElementType(V1.getValueType()); - unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; + MVT EltVT = V1.getValueType().getVectorElementType(); + unsigned BytesPerElement = EltVT.getSizeInBits()/8; SmallVector<SDOperand, 16> ResultMask; for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { @@ -3794,7 +3795,7 @@ SDOperand PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, Op.getOperand(3), // RHS DAG.getConstant(CompareOpc, MVT::i32) }; - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(Op.getOperand(2).getValueType()); VTs.push_back(MVT::Flag); SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); @@ -3843,7 +3844,7 @@ SDOperand PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, // Create a stack slot that is 16-byte aligned. MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); int FrameIdx = FrameInfo->CreateStackObject(16, 16); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); // Store the input value into Value#0 of the stack slot. @@ -4154,7 +4155,7 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, SDOperand Load = N->getOperand(0); LoadSDNode *LD = cast<LoadSDNode>(Load); // Create the byte-swapping load. - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(MVT::i32); VTs.push_back(MVT::Other); SDOperand MO = DAG.getMemOperand(LD->getMemOperand()); @@ -4264,7 +4265,7 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); // Create the PPCISD altivec 'dot' comparison node. - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; SDOperand Ops[] = { LHS.getOperand(2), // LHS of compare LHS.getOperand(3), // RHS of compare @@ -4367,7 +4368,7 @@ PPCTargetLowering::getConstraintType(const std::string &Constraint) const { std::pair<unsigned, const TargetRegisterClass*> PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { if (Constraint.size() == 1) { // GCC RS6000 Constraint Letters switch (Constraint[0]) { @@ -4527,7 +4528,7 @@ SDOperand PPCTargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) return SDOperand(); - MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); bool isPPC64 = PtrVT == MVT::i64; MachineFunction &MF = DAG.getMachineFunction(); diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index d28799a..34012ff 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -235,7 +235,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ISD::SETCC ValueType - virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDOperand &) const; /// getPreIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if the node's address @@ -290,7 +290,7 @@ namespace llvm { ConstraintType getConstraintType(const std::string &Constraint) const; std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// function arguments in the caller parameter area. This is the actual diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 3d5ad0b..89172fc 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -91,9 +91,9 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { std::vector<SDOperand> OutChains; for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { - MVT::ValueType ObjectVT = getValueType(I->getType()); + MVT ObjectVT = getValueType(I->getType()); - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); case MVT::i1: case MVT::i8: @@ -123,7 +123,7 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { ISD::LoadExtType LoadOp = ISD::SEXTLOAD; // Sparc is big endian, so add an offset based on the ObjectVT. - unsigned Offset = 4-std::max(1U, MVT::getSizeInBits(ObjectVT)/8); + unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8); FIPtr = DAG.getNode(ISD::ADD, MVT::i32, FIPtr, DAG.getConstant(Offset, MVT::i32)); Load = DAG.getExtLoad(LoadOp, MVT::i32, Root, FIPtr, @@ -246,7 +246,7 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { // Count the size of the outgoing arguments. unsigned ArgsSize = 0; for (unsigned i = 5, e = Op.getNumOperands(); i != e; i += 2) { - switch (Op.getOperand(i).getValueType()) { + switch (Op.getOperand(i).getValueType().getSimpleVT()) { default: assert(0 && "Unknown value type!"); case MVT::i1: case MVT::i8: @@ -323,10 +323,10 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { for (unsigned i = 5, e = Op.getNumOperands(); i != e; i += 2) { SDOperand Val = Op.getOperand(i); - MVT::ValueType ObjectVT = Val.getValueType(); + MVT ObjectVT = Val.getValueType(); SDOperand ValToStore(0, 0); unsigned ObjSize; - switch (ObjectVT) { + switch (ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); case MVT::i32: ObjSize = 4; @@ -414,7 +414,7 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); - std::vector<MVT::ValueType> NodeTys; + std::vector<MVT> NodeTys; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. SDOperand Ops[] = { Chain, Callee, InFlag }; @@ -744,7 +744,7 @@ static SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG) { // Get the condition flag. SDOperand CompareFlag; if (LHS.getValueType() == MVT::i32) { - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(MVT::i32); VTs.push_back(MVT::Flag); SDOperand Ops[2] = { LHS, RHS }; @@ -774,7 +774,7 @@ static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { SDOperand CompareFlag; if (LHS.getValueType() == MVT::i32) { - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; VTs.push_back(LHS.getValueType()); // subcc returns a value VTs.push_back(MVT::Flag); SDOperand Ops[2] = { LHS, RHS }; @@ -804,14 +804,14 @@ static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, static SDOperand LowerVAARG(SDOperand Op, SelectionDAG &DAG) { SDNode *Node = Op.Val; - MVT::ValueType VT = Node->getValueType(0); + MVT VT = Node->getValueType(0); SDOperand InChain = Node->getOperand(0); SDOperand VAListPtr = Node->getOperand(1); const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); SDOperand VAList = DAG.getLoad(MVT::i32, InChain, VAListPtr, SV, 0); // Increment the pointer, VAList, to the next vaarg SDOperand NextPtr = DAG.getNode(ISD::ADD, MVT::i32, VAList, - DAG.getConstant(MVT::getSizeInBits(VT)/8, + DAG.getConstant(VT.getSizeInBits()/8, MVT::i32)); // Store the incremented VAList to the legalized pointer InChain = DAG.getStore(VAList.getValue(1), NextPtr, @@ -846,7 +846,7 @@ static SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG) { // to provide a register spill area. SDOperand NewVal = DAG.getNode(ISD::ADD, MVT::i32, NewSP, DAG.getConstant(96, MVT::i32)); - std::vector<MVT::ValueType> Tys; + std::vector<MVT> Tys; Tys.push_back(MVT::i32); Tys.push_back(MVT::Other); SDOperand Ops[2] = { NewVal, Chain }; diff --git a/lib/Target/TargetRegisterInfo.cpp b/lib/Target/TargetRegisterInfo.cpp index 9c8de12..3f44a0c 100644 --- a/lib/Target/TargetRegisterInfo.cpp +++ b/lib/Target/TargetRegisterInfo.cpp @@ -48,8 +48,7 @@ namespace { /// register of the given type. If type is MVT::Other, then just return any /// register class the register belongs to. const TargetRegisterClass * -TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, - MVT::ValueType VT) const { +TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, MVT VT) const { assert(isPhysicalRegister(reg) && "reg must be a physical register"); // Pick the register class of the right type that contains this physreg. diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td index 474f910..b7fadd1 100644 --- a/lib/Target/TargetSelectionDAG.td +++ b/lib/Target/TargetSelectionDAG.td @@ -53,8 +53,8 @@ class SDTCisOpSmallerThanOp<int SmallOp, int BigOp> : SDTypeConstraint<SmallOp>{ /// SDTCisIntVectorOfSameSize - This indicates that ThisOp and OtherOp are /// vector types, and that ThisOp is the result of -/// MVT::getIntVectorWithNumElements with the number of elements that ThisOp -/// has. +/// MVT::getIntVectorWithNumElements with the number of elements +/// that ThisOp has. class SDTCisIntVectorOfSameSize<int ThisOp, int OtherOp> : SDTypeConstraint<ThisOp> { int OtherOpNum = OtherOp; @@ -467,8 +467,8 @@ class PatLeaf<dag frag, code pred = [{}], SDNodeXForm xform = NOOP_SDNodeXForm> // Leaf fragments. -def vtInt : PatLeaf<(vt), [{ return MVT::isInteger(N->getVT()); }]>; -def vtFP : PatLeaf<(vt), [{ return MVT::isFloatingPoint(N->getVT()); }]>; +def vtInt : PatLeaf<(vt), [{ return N->getVT().isInteger(); }]>; +def vtFP : PatLeaf<(vt), [{ return N->getVT().isFloatingPoint(); }]>; def immAllOnes : PatLeaf<(imm), [{ return N->isAllOnesValue(); }]>; def immAllOnesV: PatLeaf<(build_vector), [{ diff --git a/lib/Target/X86/X86ATTAsmPrinter.cpp b/lib/Target/X86/X86ATTAsmPrinter.cpp index 5cae112..eacab47 100644 --- a/lib/Target/X86/X86ATTAsmPrinter.cpp +++ b/lib/Target/X86/X86ATTAsmPrinter.cpp @@ -215,7 +215,7 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo, O << '%'; unsigned Reg = MO.getReg(); if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) { - MVT::ValueType VT = (strcmp(Modifier+6,"64") == 0) ? + MVT VT = (strcmp(Modifier+6,"64") == 0) ? MVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? MVT::i32 : ((strcmp(Modifier+6,"16") == 0) ? MVT::i16 : MVT::i8)); Reg = getX86SubSuperRegister(Reg, VT); diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index bb8c58a..5ee9122 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -215,7 +215,7 @@ namespace { /// getTruncate - return an SDNode that implements a subreg based truncate /// of the specified operand to the the specified value type. - SDNode *getTruncate(SDOperand N0, MVT::ValueType VT); + SDNode *getTruncate(SDOperand N0, MVT VT); #ifndef NDEBUG unsigned Indent; @@ -329,7 +329,7 @@ bool X86DAGToDAGISel::CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const { // NU), then TF is a predecessor of FU and a successor of NU. But since // NU and FU are flagged together, this effectively creates a cycle. bool HasFlagUse = false; - MVT::ValueType VT = Root->getValueType(Root->getNumValues()-1); + MVT VT = Root->getValueType(Root->getNumValues()-1); while ((VT == MVT::Flag && !Root->use_empty())) { SDNode *FU = findFlagUse(Root); if (FU == NULL) @@ -440,8 +440,8 @@ void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) { SDOperand N1 = I->getOperand(1); SDOperand N2 = I->getOperand(2); - if ((MVT::isFloatingPoint(N1.getValueType()) && - !MVT::isVector(N1.getValueType())) || + if ((N1.getValueType().isFloatingPoint() && + !N1.getValueType().isVector()) || !N1.hasOneUse()) continue; @@ -505,8 +505,8 @@ void X86DAGToDAGISel::PreprocessForFPConvert(SelectionDAG &DAG) { // If the source and destination are SSE registers, then this is a legal // conversion that should not be lowered. - MVT::ValueType SrcVT = N->getOperand(0).getValueType(); - MVT::ValueType DstVT = N->getValueType(0); + MVT SrcVT = N->getOperand(0).getValueType(); + MVT DstVT = N->getValueType(0); bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT); bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT); if (SrcIsSSE && DstIsSSE) @@ -524,7 +524,7 @@ void X86DAGToDAGISel::PreprocessForFPConvert(SelectionDAG &DAG) { // Here we could have an FP stack truncation or an FPStack <-> SSE convert. // FPStack has extload and truncstore. SSE can fold direct loads into other // operations. Based on this, decide what we want to do. - MVT::ValueType MemVT; + MVT MemVT; if (N->getOpcode() == ISD::FP_ROUND) MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. else @@ -942,7 +942,7 @@ bool X86DAGToDAGISel::SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base, if (MatchAddress(N, AM)) return false; - MVT::ValueType VT = N.getValueType(); + MVT VT = N.getValueType(); if (AM.BaseType == X86ISelAddressMode::RegBase) { if (!AM.Base.Reg.Val) AM.Base.Reg = CurDAG->getRegister(0, VT); @@ -1016,7 +1016,7 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDOperand Op, SDOperand N, if (MatchAddress(N, AM)) return false; - MVT::ValueType VT = N.getValueType(); + MVT VT = N.getValueType(); unsigned Complexity = 0; if (AM.BaseType == X86ISelAddressMode::RegBase) if (AM.Base.Reg.Val) @@ -1110,16 +1110,17 @@ static SDNode *FindCallStartFromCall(SDNode *Node) { return FindCallStartFromCall(Node->getOperand(0).Val); } -SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT::ValueType VT) { +SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT VT) { SDOperand SRIdx; - switch (VT) { + switch (VT.getSimpleVT()) { + default: assert(0 && "Unknown truncate!"); case MVT::i8: SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1 // Ensure that the source register has an 8-bit subreg on 32-bit targets if (!Subtarget->is64Bit()) { unsigned Opc; - MVT::ValueType VT; - switch (N0.getValueType()) { + MVT VT; + switch (N0.getValueType().getSimpleVT()) { default: assert(0 && "Unknown truncate!"); case MVT::i16: Opc = X86::MOV16to16_; @@ -1141,7 +1142,6 @@ SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT::ValueType VT) { case MVT::i32: SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3 break; - default: assert(0 && "Unknown truncate!"); break; } return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, VT, N0, SRIdx); } @@ -1149,7 +1149,7 @@ SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT::ValueType VT) { SDNode *X86DAGToDAGISel::Select(SDOperand N) { SDNode *Node = N.Val; - MVT::ValueType NVT = Node->getValueType(0); + MVT NVT = Node->getValueType(0); unsigned Opc, MOpc; unsigned Opcode = Node->getOpcode(); @@ -1183,7 +1183,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { // RIP-relative addressing. if (TM.getCodeModel() != CodeModel::Small) break; - MVT::ValueType PtrVT = TLI.getPointerTy(); + MVT PtrVT = TLI.getPointerTy(); SDOperand N0 = N.getOperand(0); SDOperand N1 = N.getOperand(1); if (N.Val->getValueType(0) == PtrVT && @@ -1224,7 +1224,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { bool isSigned = Opcode == ISD::SMUL_LOHI; if (!isSigned) - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; @@ -1232,7 +1232,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break; } else - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; @@ -1241,7 +1241,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { } unsigned LoReg, HiReg; - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break; case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break; @@ -1334,7 +1334,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { bool isSigned = Opcode == ISD::SDIVREM; if (!isSigned) - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; @@ -1342,7 +1342,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; } else - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; @@ -1352,7 +1352,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { unsigned LoReg, HiReg; unsigned ClrOpcode, SExtOpcode; - switch (NVT) { + switch (NVT.getSimpleVT()) { default: assert(0 && "Unsupported VT!"); case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; @@ -1493,7 +1493,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { SDOperand N0 = Node->getOperand(0); // Get the subregsiter index for the type to extend. - MVT::ValueType N0VT = N0.getValueType(); + MVT N0VT = N0.getValueType(); unsigned Idx = (N0VT == MVT::i32) ? X86::SUBREG_32BIT : (N0VT == MVT::i16) ? X86::SUBREG_16BIT : (Subtarget->is64Bit()) ? X86::SUBREG_8BIT : 0; @@ -1523,30 +1523,30 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { SDOperand N0 = Node->getOperand(0); AddToISelQueue(N0); - MVT::ValueType SVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); + MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); SDOperand TruncOp = SDOperand(getTruncate(N0, SVT), 0); unsigned Opc = 0; - switch (NVT) { + switch (NVT.getSimpleVT()) { + default: assert(0 && "Unknown sign_extend_inreg!"); case MVT::i16: if (SVT == MVT::i8) Opc = X86::MOVSX16rr8; else assert(0 && "Unknown sign_extend_inreg!"); break; case MVT::i32: - switch (SVT) { + switch (SVT.getSimpleVT()) { + default: assert(0 && "Unknown sign_extend_inreg!"); case MVT::i8: Opc = X86::MOVSX32rr8; break; case MVT::i16: Opc = X86::MOVSX32rr16; break; - default: assert(0 && "Unknown sign_extend_inreg!"); } break; case MVT::i64: - switch (SVT) { + switch (SVT.getSimpleVT()) { + default: assert(0 && "Unknown sign_extend_inreg!"); case MVT::i8: Opc = X86::MOVSX64rr8; break; case MVT::i16: Opc = X86::MOVSX64rr16; break; case MVT::i32: Opc = X86::MOVSX64rr32; break; - default: assert(0 && "Unknown sign_extend_inreg!"); } break; - default: assert(0 && "Unknown sign_extend_inreg!"); } SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c4307b8..71f0779 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -493,44 +493,44 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // will selectively turn on ones that can be effectively codegen'd. for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { - setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); - setOperationAction(ISD::VSETCC, (MVT::ValueType)VT, Expand); + setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::VSETCC, (MVT::SimpleValueType)VT, Expand); } if (Subtarget->hasMMX()) { @@ -654,13 +654,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); // Custom lower build_vector, vector_shuffle, and extract_vector_elt. - for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { + for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) { + MVT VT = (MVT::SimpleValueType)i; // Do not attempt to custom lower non-power-of-2 vectors - if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) + if (!isPowerOf2_32(VT.getVectorNumElements())) continue; - setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); + setOperationAction(ISD::BUILD_VECTOR, VT, Custom); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); } setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); @@ -675,16 +676,16 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { - setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); - setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); - setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); - setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); - setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); - AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); + setOperationAction(ISD::AND, (MVT::SimpleValueType)VT, Promote); + AddPromotedToType (ISD::AND, (MVT::SimpleValueType)VT, MVT::v2i64); + setOperationAction(ISD::OR, (MVT::SimpleValueType)VT, Promote); + AddPromotedToType (ISD::OR, (MVT::SimpleValueType)VT, MVT::v2i64); + setOperationAction(ISD::XOR, (MVT::SimpleValueType)VT, Promote); + AddPromotedToType (ISD::XOR, (MVT::SimpleValueType)VT, MVT::v2i64); + setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Promote); + AddPromotedToType (ISD::LOAD, (MVT::SimpleValueType)VT, MVT::v2i64); + setOperationAction(ISD::SELECT, (MVT::SimpleValueType)VT, Promote); + AddPromotedToType (ISD::SELECT, (MVT::SimpleValueType)VT, MVT::v2i64); } setTruncStoreAction(MVT::f64, MVT::f32, Expand); @@ -743,8 +744,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) } -MVT::ValueType -X86TargetLowering::getSetCCResultType(const SDOperand &) const { +MVT X86TargetLowering::getSetCCResultType(const SDOperand &) const { return MVT::i8; } @@ -792,7 +792,7 @@ unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { /// and store operations as a result of memset, memcpy, and memmove /// lowering. It returns MVT::iAny if SelectionDAG should be responsible for /// determining it. -MVT::ValueType +MVT X86TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align, bool isSrcConst, bool isSrcStr) const { if ((isSrcConst || isSrcStr) && Subtarget->hasSSE2() && Size >= 16) @@ -948,7 +948,7 @@ LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { - MVT::ValueType CopyVT = RVLocs[i].getValVT(); + MVT CopyVT = RVLocs[i].getValVT(); // If this is a call to a function that returns an fp value on the floating // point stack, but where we prefer to use the value in xmm registers, copy @@ -1123,7 +1123,7 @@ SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, // changed with more analysis. // In case of tail call optimization mark all arguments mutable. Since they // could be overwritten by lowering of arguments in case of a tail call. - int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, + int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8, VA.getLocMemOffset(), isImmutable); SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); if (Flags.isByVal()) @@ -1172,7 +1172,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { LastVal = VA.getValNo(); if (VA.isRegLoc()) { - MVT::ValueType RegVT = VA.getLocVT(); + MVT RegVT = VA.getLocVT(); TargetRegisterClass *RC; if (RegVT == MVT::i32) RC = X86::GR32RegisterClass; @@ -1182,10 +1182,10 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { RC = X86::FR32RegisterClass; else if (RegVT == MVT::f64) RC = X86::FR64RegisterClass; - else if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 128) + else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) RC = X86::VR128RegisterClass; - else if (MVT::isVector(RegVT)) { - assert(MVT::getSizeInBits(RegVT) == 64); + else if (RegVT.isVector()) { + assert(RegVT.getSizeInBits() == 64); if (!Is64Bit) RC = X86::VR64RegisterClass; // MMX values are passed in MMXs. else { @@ -1221,7 +1221,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { // Handle MMX values passed in GPRs. if (Is64Bit && RegVT != VA.getLocVT()) { - if (MVT::getSizeInBits(RegVT) == 64 && RC == X86::GR64RegisterClass) + if (RegVT.getSizeInBits() == 64 && RC == X86::GR64RegisterClass) ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); else if (RC == X86::VR128RegisterClass) { ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i64, ArgValue, @@ -1408,7 +1408,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, if (!IsTailCall || FPDiff==0) return Chain; // Adjust the Return address stack slot. - MVT::ValueType VT = getPointerTy(); + MVT VT = getPointerTy(); OutRetAddr = getReturnAddressFrameIndex(DAG); // Load the "old" Return address. OutRetAddr = DAG.getLoad(VT, Chain,OutRetAddr, NULL, 0); @@ -1427,7 +1427,7 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, int SlotSize = Is64Bit ? 8 : 4; int NewReturnAddrFI = MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); - MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; + MVT VT = Is64Bit ? MVT::i64 : MVT::i32; SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx, PseudoSourceValue::getFixedStack(), NewReturnAddrFI); @@ -1514,8 +1514,8 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { if (VA.isRegLoc()) { if (Is64Bit) { - MVT::ValueType RegVT = VA.getLocVT(); - if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 64) + MVT RegVT = VA.getLocVT(); + if (RegVT.isVector() && RegVT.getSizeInBits() == 64) switch (VA.getLocReg()) { default: break; @@ -1630,7 +1630,7 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { cast<ARG_FLAGSSDNode>(FlagsOp)->getArgFlags(); // Create frame index. int32_t Offset = VA.getLocMemOffset()+FPDiff; - uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; + uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); FIN = DAG.getFrameIndex(FI, getPointerTy()); @@ -2567,9 +2567,9 @@ static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, SDOperand &V2, SDOperand &Mask, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType MaskVT = Mask.getValueType(); - MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); + MVT VT = Op.getValueType(); + MVT MaskVT = Mask.getValueType(); + MVT EltVT = MaskVT.getVectorElementType(); unsigned NumElems = Mask.getNumOperands(); SmallVector<SDOperand, 8> MaskVec; @@ -2596,8 +2596,8 @@ static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, /// the two vector operands have swapped position. static SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { - MVT::ValueType MaskVT = Mask.getValueType(); - MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = Mask.getValueType(); + MVT EltVT = MaskVT.getVectorElementType(); unsigned NumElems = Mask.getNumOperands(); SmallVector<SDOperand, 8> MaskVec; for (unsigned i = 0; i != NumElems; ++i) { @@ -2756,14 +2756,13 @@ static bool isZeroShuffle(SDNode *N) { /// getZeroVector - Returns a vector of specified type with all zero elements. /// -static SDOperand getZeroVector(MVT::ValueType VT, bool HasSSE2, - SelectionDAG &DAG) { - assert(MVT::isVector(VT) && "Expected a vector type"); +static SDOperand getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG) { + assert(VT.isVector() && "Expected a vector type"); // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest // type. This ensures they get CSE'd. SDOperand Vec; - if (MVT::getSizeInBits(VT) == 64) { // MMX + if (VT.getSizeInBits() == 64) { // MMX SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); } else if (HasSSE2) { // SSE2 @@ -2778,14 +2777,14 @@ static SDOperand getZeroVector(MVT::ValueType VT, bool HasSSE2, /// getOnesVector - Returns a vector of specified type with all bits set. /// -static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { - assert(MVT::isVector(VT) && "Expected a vector type"); +static SDOperand getOnesVector(MVT VT, SelectionDAG &DAG) { + assert(VT.isVector() && "Expected a vector type"); // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest // type. This ensures they get CSE'd. SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); SDOperand Vec; - if (MVT::getSizeInBits(VT) == 64) // MMX + if (VT.getSizeInBits() == 64) // MMX Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); else // SSE Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); @@ -2822,8 +2821,8 @@ static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd /// operation of specified width. static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT BaseVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 8> MaskVec; MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); @@ -2835,8 +2834,8 @@ static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { /// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation /// of specified width. static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT BaseVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 8> MaskVec; for (unsigned i = 0, e = NumElems/2; i != e; ++i) { MaskVec.push_back(DAG.getConstant(i, BaseVT)); @@ -2848,8 +2847,8 @@ static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { /// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation /// of specified width. static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT BaseVT = MaskVT.getVectorElementType(); unsigned Half = NumElems/2; SmallVector<SDOperand, 8> MaskVec; for (unsigned i = 0; i != Half; ++i) { @@ -2864,8 +2863,8 @@ static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { /// elements in place. static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, SelectionDAG &DAG) { - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT BaseVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 8> MaskVec; // Element #0 of the result gets the elt we are replacing. MaskVec.push_back(DAG.getConstant(DestElt, BaseVT)); @@ -2876,8 +2875,8 @@ static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, /// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32. static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG, bool HasSSE2) { - MVT::ValueType PVT = HasSSE2 ? MVT::v4i32 : MVT::v4f32; - MVT::ValueType VT = Op.getValueType(); + MVT PVT = HasSSE2 ? MVT::v4i32 : MVT::v4f32; + MVT VT = Op.getValueType(); if (PVT == VT) return Op; SDOperand V1 = Op.getOperand(0); @@ -2906,12 +2905,12 @@ static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG, bool HasSSE2) { static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx, bool isZero, bool HasSSE2, SelectionDAG &DAG) { - MVT::ValueType VT = V2.getValueType(); + MVT VT = V2.getValueType(); SDOperand V1 = isZero ? getZeroVector(VT, HasSSE2, DAG) : DAG.getNode(ISD::UNDEF, VT); - unsigned NumElems = MVT::getVectorNumElements(V2.getValueType()); - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); + unsigned NumElems = V2.getValueType().getVectorNumElements(); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT EVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 16> MaskVec; for (unsigned i = 0; i != NumElems; ++i) if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. @@ -3061,11 +3060,11 @@ static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, /// getVShift - Return a vector logical shift node. /// -static SDOperand getVShift(bool isLeft, MVT::ValueType VT, SDOperand SrcOp, +static SDOperand getVShift(bool isLeft, MVT VT, SDOperand SrcOp, unsigned NumBits, SelectionDAG &DAG, const TargetLowering &TLI) { - bool isMMX = MVT::getSizeInBits(VT) == 64; - MVT::ValueType ShVT = isMMX ? MVT::v1i64 : MVT::v2i64; + bool isMMX = VT.getSizeInBits() == 64; + MVT ShVT = isMMX ? MVT::v1i64 : MVT::v2i64; unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL; SrcOp = DAG.getNode(ISD::BIT_CONVERT, ShVT, SrcOp); return DAG.getNode(ISD::BIT_CONVERT, VT, @@ -3088,9 +3087,9 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG); } - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType EVT = MVT::getVectorElementType(VT); - unsigned EVTBits = MVT::getSizeInBits(EVT); + MVT VT = Op.getValueType(); + MVT EVT = VT.getVectorElementType(); + unsigned EVTBits = EVT.getSizeInBits(); unsigned NumElems = Op.getNumOperands(); unsigned NumZero = 0; @@ -3133,8 +3132,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { (!IsAllConstants || Idx == 0)) { if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { // Handle MMX and SSE both. - MVT::ValueType VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32; - MVT::ValueType VecElts = VT == MVT::v2i64 ? 4 : 2; + MVT VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32; + unsigned VecElts = VT == MVT::v2i64 ? 4 : 2; // Truncate the value (which may itself be a constant) to i32, and // convert it to a vector with movd (S2V+shuffle to zero extend). @@ -3173,7 +3172,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { // Is it a vector logical left shift? if (NumElems == 2 && Idx == 1 && isZeroNode(Op.getOperand(0)) && !isZeroNode(Op.getOperand(1))) { - unsigned NumBits = MVT::getSizeInBits(VT); + unsigned NumBits = VT.getSizeInBits(); return getVShift(true, VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(1)), NumBits/2, DAG, *this); @@ -3193,8 +3192,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { // Turn it into a shuffle of zero and zero-extended scalar to vector. Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget->hasSSE2(), DAG); - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT MaskEVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 8> MaskVec; for (unsigned i = 0; i < NumElems; i++) MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); @@ -3273,8 +3272,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { } } - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); - MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); + MVT EVT = MaskVT.getVectorElementType(); SmallVector<SDOperand, 8> MaskVec; bool Reverse = (NonZeros & 0x3) == 2; for (unsigned i = 0; i < 2; ++i) @@ -3320,9 +3319,9 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, SDOperand PermMask, SelectionDAG &DAG, TargetLowering &TLI) { SDOperand NewV; - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); - MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); - MVT::ValueType PtrVT = TLI.getPointerTy(); + MVT MaskVT = MVT::getIntVectorWithNumElements(8); + MVT MaskEVT = MaskVT.getVectorElementType(); + MVT PtrVT = TLI.getPointerTy(); SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), PermMask.Val->op_end()); @@ -3562,23 +3561,23 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, /// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> static SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, - MVT::ValueType VT, + MVT VT, SDOperand PermMask, SelectionDAG &DAG, TargetLowering &TLI) { unsigned NumElems = PermMask.getNumOperands(); unsigned NewWidth = (NumElems == 4) ? 2 : 4; - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); - MVT::ValueType NewVT = MaskVT; - switch (VT) { + MVT MaskVT = MVT::getIntVectorWithNumElements(NewWidth); + MVT NewVT = MaskVT; + switch (VT.getSimpleVT()) { + default: assert(false && "Unexpected!"); case MVT::v4f32: NewVT = MVT::v2f64; break; case MVT::v4i32: NewVT = MVT::v2i64; break; case MVT::v8i16: NewVT = MVT::v4i32; break; case MVT::v16i8: NewVT = MVT::v4i32; break; - default: assert(false && "Unexpected!"); } if (NewWidth == 2) { - if (MVT::isInteger(VT)) + if (VT.isInteger()) NewVT = MVT::v2i64; else NewVT = MVT::v2f64; @@ -3612,9 +3611,9 @@ SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, /// getVZextMovL - Return a zero-extending vector move low node. /// -static SDOperand getVZextMovL(MVT::ValueType VT, MVT::ValueType OpVT, - SDOperand SrcOp, SelectionDAG &DAG, - const X86Subtarget *Subtarget) { +static SDOperand getVZextMovL(MVT VT, MVT OpVT, + SDOperand SrcOp, SelectionDAG &DAG, + const X86Subtarget *Subtarget) { if (VT == MVT::v2f64 || VT == MVT::v4f32) { LoadSDNode *LD = NULL; if (!isScalarLoadToVector(SrcOp.Val, &LD)) @@ -3622,7 +3621,7 @@ static SDOperand getVZextMovL(MVT::ValueType VT, MVT::ValueType OpVT, if (!LD) { // movssrr and movsdrr do not clear top bits. Try to use movd, movq // instead. - MVT::ValueType EVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; + MVT EVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; if ((EVT != MVT::i64 || Subtarget->is64Bit()) && SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT && @@ -3647,9 +3646,9 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { SDOperand V1 = Op.getOperand(0); SDOperand V2 = Op.getOperand(1); SDOperand PermMask = Op.getOperand(2); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); unsigned NumElems = PermMask.getNumOperands(); - bool isMMX = MVT::getSizeInBits(VT) == 64; + bool isMMX = VT.getSizeInBits() == 64; bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; bool V1IsSplat = false; @@ -3710,8 +3709,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { if (isShift && ShVal.hasOneUse()) { // If the shifted value has multiple uses, it may be cheaper to use // v_set0 + movlhps or movhlps, etc. - MVT::ValueType EVT = MVT::getVectorElementType(VT); - ShAmt *= MVT::getSizeInBits(EVT); + MVT EVT = VT.getVectorElementType(); + ShAmt *= EVT.getSizeInBits(); return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this); } @@ -3736,8 +3735,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { if (isShift) { // No better options. Use a vshl / vsrl. - MVT::ValueType EVT = MVT::getVectorElementType(VT); - ShAmt *= MVT::getSizeInBits(EVT); + MVT EVT = VT.getVectorElementType(); + ShAmt *= EVT.getSizeInBits(); return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this); } @@ -3821,7 +3820,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { (X86::isPSHUFDMask(PermMask.Val) || X86::isPSHUFHWMask(PermMask.Val) || X86::isPSHUFLWMask(PermMask.Val))) { - MVT::ValueType RVT = VT; + MVT RVT = VT; if (VT == MVT::v4f32) { RVT = MVT::v4i32; Op = DAG.getNode(ISD::VECTOR_SHUFFLE, RVT, @@ -3851,8 +3850,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { // Handle all 4 wide cases with a number of shuffles. if (NumElems == 4 && !isMMX) { // Don't do this for MMX. - MVT::ValueType MaskVT = PermMask.getValueType(); - MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); + MVT MaskVT = PermMask.getValueType(); + MVT MaskEVT = MaskVT.getVectorElementType(); SmallVector<std::pair<int, int>, 8> Locs; Locs.reserve(NumElems); SmallVector<SDOperand, 8> Mask1(NumElems, @@ -3959,14 +3958,14 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { SDOperand X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - if (MVT::getSizeInBits(VT) == 8) { + MVT VT = Op.getValueType(); + if (VT.getSizeInBits() == 8) { SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, Op.getOperand(0), Op.getOperand(1)); SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, VT, Assert); - } else if (MVT::getSizeInBits(VT) == 16) { + } else if (VT.getSizeInBits() == 16) { SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, Op.getOperand(0), Op.getOperand(1)); SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, @@ -4003,9 +4002,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { return Res; } - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); // TODO: handle v16i8. - if (MVT::getSizeInBits(VT) == 16) { + if (VT.getSizeInBits() == 16) { SDOperand Vec = Op.getOperand(0); unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); if (Idx == 0) @@ -4014,27 +4013,27 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), Op.getOperand(1))); // Transform it so it match pextrw which produces a 32-bit result. - MVT::ValueType EVT = (MVT::ValueType)(VT+1); + MVT EVT = (MVT::SimpleValueType)(VT.getSimpleVT()+1); SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, Op.getOperand(0), Op.getOperand(1)); SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, VT, Assert); - } else if (MVT::getSizeInBits(VT) == 32) { + } else if (VT.getSizeInBits() == 32) { unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); if (Idx == 0) return Op; // SHUFPS the element to the lowest double word, then movss. - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); + MVT MaskVT = MVT::getIntVectorWithNumElements(4); SmallVector<SDOperand, 8> IdxVec; IdxVec. - push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); + push_back(DAG.getConstant(Idx, MaskVT.getVectorElementType())); IdxVec. - push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); + push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType())); IdxVec. - push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); + push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType())); IdxVec. - push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); + push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType())); SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &IdxVec[0], IdxVec.size()); SDOperand Vec = Op.getOperand(0); @@ -4042,7 +4041,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, DAG.getIntPtrConstant(0)); - } else if (MVT::getSizeInBits(VT) == 64) { + } else if (VT.getSizeInBits() == 64) { // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught // to match extract_elt for f64. @@ -4053,11 +4052,11 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { // UNPCKHPD the element to the lowest double word, then movsd. // Note if the lower 64 bits of the result of the UNPCKHPD is then stored // to a f64mem, the whole operation is folded into a single MOVHPDmr. - MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); + MVT MaskVT = MVT::getIntVectorWithNumElements(4); SmallVector<SDOperand, 8> IdxVec; - IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); + IdxVec.push_back(DAG.getConstant(1, MaskVT.getVectorElementType())); IdxVec. - push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); + push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType())); SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &IdxVec[0], IdxVec.size()); SDOperand Vec = Op.getOperand(0); @@ -4072,15 +4071,15 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { SDOperand X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType EVT = MVT::getVectorElementType(VT); + MVT VT = Op.getValueType(); + MVT EVT = VT.getVectorElementType(); SDOperand N0 = Op.getOperand(0); SDOperand N1 = Op.getOperand(1); SDOperand N2 = Op.getOperand(2); - if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) { - unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB + if ((EVT.getSizeInBits() == 8) || (EVT.getSizeInBits() == 16)) { + unsigned Opc = (EVT.getSizeInBits() == 8) ? X86ISD::PINSRB : X86ISD::PINSRW; // Transform it so it match pinsr{b,w} which expects a GR32 as its second // argument. @@ -4106,8 +4105,8 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ SDOperand X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType EVT = MVT::getVectorElementType(VT); + MVT VT = Op.getValueType(); + MVT EVT = VT.getVectorElementType(); if (Subtarget->hasSSE41()) return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); @@ -4119,7 +4118,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { SDOperand N1 = Op.getOperand(1); SDOperand N2 = Op.getOperand(2); - if (MVT::getSizeInBits(EVT) == 16) { + if (EVT.getSizeInBits() == 16) { // Transform it so it match pinsrw which expects a 16-bit value in a GR32 // as its second argument. if (N1.getValueType() != MVT::i32) @@ -4134,8 +4133,8 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { SDOperand X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); - MVT::ValueType VT = MVT::v2i32; - switch (Op.getValueType()) { + MVT VT = MVT::v2i32; + switch (Op.getValueType().getSimpleVT()) { default: break; case MVT::v16i8: case MVT::v8i16: @@ -4201,7 +4200,7 @@ X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit static SDOperand LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, - const MVT::ValueType PtrVT) { + const MVT PtrVT) { SDOperand InFlag; SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, DAG.getNode(X86ISD::GlobalBaseReg, @@ -4239,7 +4238,7 @@ LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit static SDOperand LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, - const MVT::ValueType PtrVT) { + const MVT PtrVT) { SDOperand InFlag, Chain; // emit leaq symbol@TLSGD(%rip), %rdi @@ -4271,9 +4270,8 @@ LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, // Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or // "local exec" model. -static SDOperand -LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, - const MVT::ValueType PtrVT) { +static SDOperand LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, + const MVT PtrVT) { // Get the Thread Pointer SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial @@ -4346,8 +4344,8 @@ SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { /// take a 2 x i32 value to shift plus a shift amount. SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { assert(Op.getNumOperands() == 3 && "Not a double-shift!"); - MVT::ValueType VT = Op.getValueType(); - unsigned VTBits = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + unsigned VTBits = VT.getSizeInBits(); bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; SDOperand ShOpLo = Op.getOperand(0); SDOperand ShOpHi = Op.getOperand(1); @@ -4365,7 +4363,7 @@ SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt); } - const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); + const MVT *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, DAG.getConstant(VTBits, MVT::i8)); SDOperand Cond = DAG.getNode(X86ISD::CMP, VT, @@ -4411,7 +4409,7 @@ SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { } SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); + MVT SrcVT = Op.getOperand(0).getValueType(); assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && "Unknown SINT_TO_FP to lower!"); @@ -4422,7 +4420,7 @@ SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { Subtarget->is64Bit()) return SDOperand(); - unsigned Size = MVT::getSizeInBits(SrcVT)/8; + unsigned Size = SrcVT.getSizeInBits()/8; MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); @@ -4487,11 +4485,11 @@ FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary // stack slot. MachineFunction &MF = DAG.getMachineFunction(); - unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; + unsigned MemSize = Op.getValueType().getSizeInBits()/8; int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); unsigned Opc; - switch (Op.getValueType()) { + switch (Op.getValueType().getSimpleVT()) { default: assert(0 && "Invalid FP_TO_SINT to lower!"); case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; @@ -4543,10 +4541,10 @@ SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { } SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType EltVT = VT; - if (MVT::isVector(VT)) - EltVT = MVT::getVectorElementType(VT); + MVT VT = Op.getValueType(); + MVT EltVT = VT; + if (VT.isVector()) + EltVT = VT.getVectorElementType(); std::vector<Constant*> CV; if (EltVT == MVT::f64) { Constant *C = ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63)))); @@ -4568,12 +4566,12 @@ SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { } SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType EltVT = VT; + MVT VT = Op.getValueType(); + MVT EltVT = VT; unsigned EltNum = 1; - if (MVT::isVector(VT)) { - EltVT = MVT::getVectorElementType(VT); - EltNum = MVT::getVectorNumElements(VT); + if (VT.isVector()) { + EltVT = VT.getVectorElementType(); + EltNum = VT.getVectorNumElements(); } std::vector<Constant*> CV; if (EltVT == MVT::f64) { @@ -4592,7 +4590,7 @@ SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, PseudoSourceValue::getConstantPool(), 0, false, 16); - if (MVT::isVector(VT)) { + if (VT.isVector()) { return DAG.getNode(ISD::BIT_CONVERT, VT, DAG.getNode(ISD::XOR, MVT::v2i64, DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), @@ -4605,16 +4603,16 @@ SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { SDOperand Op0 = Op.getOperand(0); SDOperand Op1 = Op.getOperand(1); - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType SrcVT = Op1.getValueType(); + MVT VT = Op.getValueType(); + MVT SrcVT = Op1.getValueType(); // If second operand is smaller, extend it first. - if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { + if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); SrcVT = VT; } // And if it is bigger, shrink it first. - if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { + if (SrcVT.getSizeInBits() > VT.getSizeInBits()) { Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); SrcVT = VT; } @@ -4641,7 +4639,7 @@ SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); // Shift sign bit right or left if the two operands have different types. - if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { + if (SrcVT.getSizeInBits() > VT.getSizeInBits()) { // Op0 is MVT::f32, Op1 is MVT::f64. SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, @@ -4680,7 +4678,7 @@ SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { SDOperand Op1 = Op.getOperand(1); SDOperand CC = Op.getOperand(2); ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); - bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); + bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); unsigned X86CC; if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, @@ -4728,10 +4726,10 @@ SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { SDOperand Cmp = Cond.getOperand(1); unsigned Opc = Cmp.getOpcode(); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); bool IllegalFPCMov = false; - if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && + if (VT.isFloatingPoint() && !VT.isVector() && !isScalarFPTypeInSSEReg(VT)) // FPStack? IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); @@ -4748,7 +4746,7 @@ SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); } - const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), + const MVT *VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag); SmallVector<SDOperand, 4> Ops; // X86ISD::CMOV means set the result (which is operand 1) to the RHS if @@ -4812,8 +4810,8 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, SDOperand Flag; - MVT::ValueType IntPtr = getPointerTy(); - MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; + MVT IntPtr = getPointerTy(); + MVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); Flag = Chain.getValue(1); @@ -4828,7 +4826,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); - std::vector<MVT::ValueType> Tys; + std::vector<MVT> Tys; Tys.push_back(SPTy); Tys.push_back(MVT::Other); SDOperand Ops1[2] = { Chain.getValue(0), Chain }; @@ -4855,7 +4853,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src); if (const char *bzeroEntry = V && V->isNullValue() ? Subtarget->getBZeroEntry() : 0) { - MVT::ValueType IntPtr = getPointerTy(); + MVT IntPtr = getPointerTy(); const Type *IntPtrTy = getTargetData()->getIntPtrType(); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; @@ -4877,7 +4875,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, uint64_t SizeVal = ConstantSize->getValue(); SDOperand InFlag(0, 0); - MVT::ValueType AVT; + MVT AVT; SDOperand Count; ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src); unsigned BytesLeft = 0; @@ -4912,7 +4910,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, } if (AVT > MVT::i8) { - unsigned UBytes = MVT::getSizeInBits(AVT) / 8; + unsigned UBytes = AVT.getSizeInBits() / 8; Count = DAG.getIntPtrConstant(SizeVal / UBytes); BytesLeft = SizeVal % UBytes; } @@ -4944,7 +4942,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, if (TwoRepStos) { InFlag = Chain.getValue(1); Count = Size; - MVT::ValueType CVT = Count.getValueType(); + MVT CVT = Count.getValueType(); SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, @@ -4959,8 +4957,8 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, } else if (BytesLeft) { // Handle the last 1 - 7 bytes. unsigned Offset = SizeVal - BytesLeft; - MVT::ValueType AddrVT = Dst.getValueType(); - MVT::ValueType SizeVT = Size.getValueType(); + MVT AddrVT = Dst.getValueType(); + MVT SizeVT = Size.getValueType(); Chain = DAG.getMemset(Chain, DAG.getNode(ISD::ADD, AddrVT, Dst, @@ -4992,7 +4990,7 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold()) return SDOperand(); - MVT::ValueType AVT; + MVT AVT; unsigned BytesLeft = 0; if (Align >= 8 && Subtarget->is64Bit()) AVT = MVT::i64; @@ -5003,7 +5001,7 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, else AVT = MVT::i8; - unsigned UBytes = MVT::getSizeInBits(AVT) / 8; + unsigned UBytes = AVT.getSizeInBits() / 8; unsigned CountVal = SizeVal / UBytes; SDOperand Count = DAG.getIntPtrConstant(CountVal); BytesLeft = SizeVal % UBytes; @@ -5031,9 +5029,9 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, if (BytesLeft) { // Handle the last 1 - 7 bytes. unsigned Offset = SizeVal - BytesLeft; - MVT::ValueType DstVT = Dst.getValueType(); - MVT::ValueType SrcVT = Src.getValueType(); - MVT::ValueType SizeVT = Size.getValueType(); + MVT DstVT = Dst.getValueType(); + MVT SrcVT = Src.getValueType(); + MVT SizeVT = Size.getValueType(); Results.push_back(DAG.getMemcpy(Chain, DAG.getNode(ISD::ADD, DstVT, Dst, DAG.getConstant(Offset, DstVT)), @@ -5280,7 +5278,7 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { return SDOperand(); unsigned NewIntNo = 0; - MVT::ValueType ShAmtVT = MVT::v4i32; + MVT ShAmtVT = MVT::v4i32; switch (IntNo) { case Intrinsic::x86_sse2_pslli_w: NewIntNo = Intrinsic::x86_sse2_psll_w; @@ -5338,7 +5336,7 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { break; } } - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); ShAmt = DAG.getNode(ISD::BIT_CONVERT, VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, ShAmtVT, ShAmt)); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VT, @@ -5554,7 +5552,7 @@ SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { const TargetMachine &TM = MF.getTarget(); const TargetFrameInfo &TFI = *TM.getFrameInfo(); unsigned StackAlignment = TFI.getStackAlignment(); - MVT::ValueType VT = Op.getValueType(); + MVT VT = Op.getValueType(); // Save FP Control Word to stack slot int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); @@ -5586,14 +5584,14 @@ SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { DAG.getConstant(3, MVT::i16)); - return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? + return DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); } SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType OpVT = VT; - unsigned NumBits = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + MVT OpVT = VT; + unsigned NumBits = VT.getSizeInBits(); Op = Op.getOperand(0); if (VT == MVT::i8) { @@ -5623,9 +5621,9 @@ SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { } SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType VT = Op.getValueType(); - MVT::ValueType OpVT = VT; - unsigned NumBits = MVT::getSizeInBits(VT); + MVT VT = Op.getValueType(); + MVT OpVT = VT; + unsigned NumBits = VT.getSizeInBits(); Op = Op.getOperand(0); if (VT == MVT::i8) { @@ -5651,10 +5649,12 @@ SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { } SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { - MVT::ValueType T = cast<AtomicSDNode>(Op.Val)->getVT(); + MVT T = cast<AtomicSDNode>(Op.Val)->getVT(); unsigned Reg = 0; unsigned size = 0; - switch(T) { + switch(T.getSimpleVT()) { + default: + assert(false && "Invalid value type!"); case MVT::i8: Reg = X86::AL; size = 1; break; case MVT::i16: Reg = X86::AX; size = 2; break; case MVT::i32: Reg = X86::EAX; size = 4; break; @@ -5680,7 +5680,7 @@ SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { } SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { - MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); + MVT T = cast<AtomicSDNode>(Op)->getVT(); assert (T == MVT::i64 && "Only know how to expand i64 CAS"); SDOperand cpInL, cpInH; cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), @@ -5716,7 +5716,7 @@ SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { } SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) { - MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); + MVT T = cast<AtomicSDNode>(Op)->getVT(); assert (T == MVT::i32 && "Only know how to expand i32 LSS"); SDOperand negOp = DAG.getNode(ISD::SUB, T, DAG.getConstant(0, T), Op->getOperand(2)); @@ -5900,12 +5900,11 @@ bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { return Subtarget->is64Bit() || NumBits1 < 64; } -bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, - MVT::ValueType VT2) const { - if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) +bool X86TargetLowering::isTruncateFree(MVT VT1, MVT VT2) const { + if (!VT1.isInteger() || !VT2.isInteger()) return false; - unsigned NumBits1 = MVT::getSizeInBits(VT1); - unsigned NumBits2 = MVT::getSizeInBits(VT2); + unsigned NumBits1 = VT1.getSizeInBits(); + unsigned NumBits2 = VT2.getSizeInBits(); if (NumBits1 <= NumBits2) return false; return Subtarget->is64Bit() || NumBits1 < 64; @@ -5916,9 +5915,9 @@ bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values /// are assumed to be legal. bool -X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { +X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT VT) const { // Only do shuffles on 128-bit vector types for now. - if (MVT::getSizeInBits(VT) == 64) return false; + if (VT.getSizeInBits() == 64) return false; return (Mask.Val->getNumOperands() <= 4 || isIdentityMask(Mask.Val) || isIdentityMask(Mask.Val, true) || @@ -5932,11 +5931,10 @@ X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { bool X86TargetLowering::isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps, - MVT::ValueType EVT, - SelectionDAG &DAG) const { + MVT EVT, SelectionDAG &DAG) const { unsigned NumElts = BVOps.size(); // Only do shuffles on 128-bit vector types for now. - if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; + if (EVT.getSizeInBits() * NumElts == 64) return false; if (NumElts == 2) return true; if (NumElts == 4) { return (isMOVLMask(&BVOps[0], 4) || @@ -6342,7 +6340,7 @@ static bool isBaseAlignmentOfN(unsigned N, SDNode *Base, } static bool EltsFromConsecutiveLoads(SDNode *N, SDOperand PermMask, - unsigned NumElems, MVT::ValueType EVT, + unsigned NumElems, MVT EVT, SDNode *&Base, SelectionDAG &DAG, MachineFrameInfo *MFI, const TargetLowering &TLI) { @@ -6370,7 +6368,7 @@ static bool EltsFromConsecutiveLoads(SDNode *N, SDOperand PermMask, continue; if (!TLI.isConsecutiveLoad(Elt.Val, Base, - MVT::getSizeInBits(EVT)/8, i, MFI)) + EVT.getSizeInBits()/8, i, MFI)) return false; } return true; @@ -6383,8 +6381,8 @@ static bool EltsFromConsecutiveLoads(SDNode *N, SDOperand PermMask, static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI) { MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType EVT = MVT::getVectorElementType(VT); + MVT VT = N->getValueType(0); + MVT EVT = VT.getVectorElementType(); SDOperand PermMask = N->getOperand(2); unsigned NumElems = PermMask.getNumOperands(); SDNode *Base = NULL; @@ -6411,8 +6409,8 @@ static SDOperand PerformBuildVectorCombine(SDNode *N, SelectionDAG &DAG, if (NumOps == 1) return SDOperand(); - MVT::ValueType VT = N->getValueType(0); - MVT::ValueType EVT = MVT::getVectorElementType(VT); + MVT VT = N->getValueType(0); + MVT EVT = VT.getVectorElementType(); if ((EVT != MVT::i64 && EVT != MVT::f64) || Subtarget->is64Bit()) // We are looking for load i64 and zero extend. We want to transform // it before legalizer has a chance to expand it. Also look for i64 @@ -6523,8 +6521,8 @@ static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, // A preferable solution to the general problem is to figure out the right // places to insert EMMS. This qualifies as a quick hack. StoreSDNode *St = cast<StoreSDNode>(N); - if (MVT::isVector(St->getValue().getValueType()) && - MVT::getSizeInBits(St->getValue().getValueType()) == 64 && + if (St->getValue().getValueType().isVector() && + St->getValue().getValueType().getSizeInBits() == 64 && isa<LoadSDNode>(St->getValue()) && !cast<LoadSDNode>(St->getValue())->isVolatile() && St->getChain().hasOneUse() && !St->isVolatile()) { @@ -6569,7 +6567,7 @@ static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, // Otherwise, lower to two 32-bit copies. SDOperand LoAddr = Ld->getBasePtr(); SDOperand HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, - DAG.getConstant(MVT::i32, 4)); + DAG.getConstant(4, MVT::i32)); SDOperand LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr, Ld->getSrcValue(), Ld->getSrcValueOffset(), @@ -6589,7 +6587,7 @@ static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, LoAddr = St->getBasePtr(); HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, - DAG.getConstant(MVT::i32, 4)); + DAG.getConstant(4, MVT::i32)); SDOperand LoSt = DAG.getStore(NewChain, LoLd, LoAddr, St->getSrcValue(), St->getSrcValueOffset(), @@ -6683,10 +6681,10 @@ X86TargetLowering::getConstraintType(const std::string &Constraint) const { /// with another that has more specific requirements based on the type of the /// corresponding operand. const char *X86TargetLowering:: -LowerXConstraint(MVT::ValueType ConstraintVT) const { +LowerXConstraint(MVT ConstraintVT) const { // FP X constraints get lowered to SSE1/2 registers if available, otherwise // 'f' like normal targets. - if (MVT::isFloatingPoint(ConstraintVT)) { + if (ConstraintVT.isFloatingPoint()) { if (Subtarget->hasSSE2()) return "Y"; if (Subtarget->hasSSE1()) @@ -6779,7 +6777,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, std::vector<unsigned> X86TargetLowering:: getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { if (Constraint.size() == 1) { // FIXME: not handling fp-stack yet! switch (Constraint[0]) { // GCC X86 Constraint Letters @@ -6807,7 +6805,7 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint, std::pair<unsigned, const TargetRegisterClass*> X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const { + MVT VT) const { // First, see if this is a constraint that directly corresponds to an LLVM // register class. if (Constraint.size() == 1) { @@ -6843,8 +6841,8 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, // FALL THROUGH. case 'x': // SSE_REGS if SSE1 allowed if (!Subtarget->hasSSE1()) break; - - switch (VT) { + + switch (VT.getSimpleVT()) { default: break; // Scalar SSE types. case MVT::f32: diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 0c67794..c7a43a4 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -345,8 +345,8 @@ namespace llvm { /// lowering. It returns MVT::iAny if SelectionDAG should be responsible for /// determining it. virtual - MVT::ValueType getOptimalMemOpType(uint64_t Size, unsigned Align, - bool isSrcConst, bool isSrcStr) const; + MVT getOptimalMemOpType(uint64_t Size, unsigned Align, + bool isSrcConst, bool isSrcStr) const; /// LowerOperation - Provide custom lowering hooks for some operations. /// @@ -369,7 +369,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ISD::SETCC ValueType - virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDOperand &) const; /// computeMaskedBitsForTargetNode - Determine which of the bits specified /// in Mask are known to be either zero or one and return them in the @@ -390,9 +390,9 @@ namespace llvm { std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; - virtual const char *LowerXConstraint(MVT::ValueType ConstraintVT) const; + virtual const char *LowerXConstraint(MVT ConstraintVT) const; /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. @@ -407,7 +407,7 @@ namespace llvm { /// error, this returns a register number of 0. std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT::ValueType VT) const; + MVT VT) const; /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. @@ -417,26 +417,25 @@ namespace llvm { /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in /// register EAX to i16 by referencing its sub-register AX. virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const; - virtual bool isTruncateFree(MVT::ValueType VT1, MVT::ValueType VT2) const; + virtual bool isTruncateFree(MVT VT1, MVT VT2) const; /// isShuffleMaskLegal - Targets can use this to indicate that they only /// support *some* VECTOR_SHUFFLE operations, those with specific masks. /// By default, if a target supports the VECTOR_SHUFFLE node, all mask /// values are assumed to be legal. - virtual bool isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const; + virtual bool isShuffleMaskLegal(SDOperand Mask, MVT VT) const; /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is /// used by Targets can use this to indicate if there is a suitable /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant /// pool entry. virtual bool isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps, - MVT::ValueType EVT, - SelectionDAG &DAG) const; + MVT EVT, SelectionDAG &DAG) const; /// ShouldShrinkFPConstant - If true, then instruction selection should /// seek to shrink the FP constant of the specified type to a smaller type /// in order to save space and / or reduce runtime. - virtual bool ShouldShrinkFPConstant(MVT::ValueType VT) const { + virtual bool ShouldShrinkFPConstant(MVT VT) const { // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more // expensive than a straight movsd. On the other hand, it's important to // shrink long double fp constant since fldt is very slow. @@ -456,7 +455,7 @@ namespace llvm { /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is /// computed in an SSE register, not on the X87 floating point stack. - bool isScalarFPTypeInSSEReg(MVT::ValueType VT) const { + bool isScalarFPTypeInSSEReg(MVT VT) const { return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 } diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 1a5fd47..4384426 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -2193,14 +2193,14 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, // Emit the load instruction. SDNode *Load = 0; if (FoldedLoad) { - MVT::ValueType VT = *RC->vt_begin(); + MVT VT = *RC->vt_begin(); Load = DAG.getTargetNode(getLoadRegOpcode(RC, RI.getStackAlignment()), VT, MVT::Other, &AddrOps[0], AddrOps.size()); NewNodes.push_back(Load); } // Emit the data processing instruction. - std::vector<MVT::ValueType> VTs; + std::vector<MVT> VTs; const TargetRegisterClass *DstRC = 0; if (TID.getNumDefs() > 0) { const TargetOperandInfo &DstTOI = TID.OpInfo[0]; @@ -2209,7 +2209,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, VTs.push_back(*DstRC->vt_begin()); } for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { - MVT::ValueType VT = N->getValueType(i); + MVT VT = N->getValueType(i); if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs()) VTs.push_back(VT); } diff --git a/lib/Target/X86/X86IntelAsmPrinter.cpp b/lib/Target/X86/X86IntelAsmPrinter.cpp index a1031bd..09a40b8 100644 --- a/lib/Target/X86/X86IntelAsmPrinter.cpp +++ b/lib/Target/X86/X86IntelAsmPrinter.cpp @@ -120,7 +120,7 @@ void X86IntelAsmPrinter::printOp(const MachineOperand &MO, if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { unsigned Reg = MO.getReg(); if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) { - MVT::ValueType VT = (strcmp(Modifier,"subreg64") == 0) ? + MVT VT = (strcmp(Modifier,"subreg64") == 0) ? MVT::i64 : ((strcmp(Modifier, "subreg32") == 0) ? MVT::i32 : ((strcmp(Modifier,"subreg16") == 0) ? MVT::i16 :MVT::i8)); Reg = getX86SubSuperRegister(Reg, VT); diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index a75c1de..0bac850 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -946,8 +946,8 @@ unsigned X86RegisterInfo::getEHHandlerRegister() const { } namespace llvm { -unsigned getX86SubSuperRegister(unsigned Reg, MVT::ValueType VT, bool High) { - switch (VT) { +unsigned getX86SubSuperRegister(unsigned Reg, MVT VT, bool High) { + switch (VT.getSimpleVT()) { default: return Reg; case MVT::i8: if (High) { diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h index 40fdfb0..93e8613 100644 --- a/lib/Target/X86/X86RegisterInfo.h +++ b/lib/Target/X86/X86RegisterInfo.h @@ -150,7 +150,7 @@ public: // getX86SubSuperRegister - X86 utility function. It returns the sub or super // register of a specific X86 register. // e.g. getX86SubSuperRegister(X86::EAX, MVT::i16) return X86:AX -unsigned getX86SubSuperRegister(unsigned, MVT::ValueType, bool High=false); +unsigned getX86SubSuperRegister(unsigned, MVT, bool High=false); } // End llvm namespace diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index 732aa5b..4727c61 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -333,11 +333,11 @@ static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, Pass *P) { /// Return true if any changes are made. static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ // If this is a noop copy, - MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); - MVT::ValueType DstVT = TLI.getValueType(CI->getType()); + MVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); + MVT DstVT = TLI.getValueType(CI->getType()); // This is an fp<->int conversion? - if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT)) + if (SrcVT.isInteger() != DstVT.isInteger()) return false; // If this is an extension, it will be a zero or sign extension, which diff --git a/lib/VMCore/Function.cpp b/lib/VMCore/Function.cpp index 2ab1194..7533185 100644 --- a/lib/VMCore/Function.cpp +++ b/lib/VMCore/Function.cpp @@ -336,7 +336,7 @@ std::string Intrinsic::getName(ID id, const Type **Tys, unsigned numTys) { std::string Result(Table[id]); for (unsigned i = 0; i < numTys; ++i) if (Tys[i]) - Result += "." + MVT::getValueTypeString(MVT::getValueType(Tys[i])); + Result += "." + MVT::getMVT(Tys[i]).getMVTString(); return Result; } diff --git a/lib/VMCore/ValueTypes.cpp b/lib/VMCore/ValueTypes.cpp index 88add7c..ca23284 100644 --- a/lib/VMCore/ValueTypes.cpp +++ b/lib/VMCore/ValueTypes.cpp @@ -1,4 +1,4 @@ -//===-- ValueTypes.cpp - Implementation of MVT::ValueType methods ---------===// +//===----------- ValueTypes.cpp - Implementation of MVT methods -----------===// // // The LLVM Compiler Infrastructure // @@ -17,17 +17,17 @@ #include "llvm/DerivedTypes.h" using namespace llvm; -/// MVT::getValueTypeString - This function returns value type as a string, -/// e.g. "i32". -std::string MVT::getValueTypeString(MVT::ValueType VT) { - switch (VT) { +/// getMVTString - This function returns value type as a string, e.g. "i32". +std::string MVT::getMVTString() const { + switch (V) { default: - if (isVector(VT)) - return "v" + utostr(getVectorNumElements(VT)) + - getValueTypeString(getVectorElementType(VT)); - if (isInteger(VT)) - return "i" + utostr(getSizeInBits(VT)); - assert(0 && "Invalid ValueType!"); + if (isVector()) + return "v" + utostr(getVectorNumElements()) + + getVectorElementType().getMVTString(); + if (isInteger()) + return "i" + utostr(getSizeInBits()); + assert(0 && "Invalid MVT!"); + return "?"; case MVT::i1: return "i1"; case MVT::i8: return "i8"; case MVT::i16: return "i16"; @@ -58,19 +58,20 @@ std::string MVT::getValueTypeString(MVT::ValueType VT) { } } -/// MVT::getTypeForValueType - This method returns an LLVM type corresponding -/// to the specified ValueType. Note that this will abort for types that cannot -/// be represented. -const Type *MVT::getTypeForValueType(MVT::ValueType VT) { - switch (VT) { +/// getTypeForMVT - This method returns an LLVM type corresponding to the +/// specified MVT. For integer types, this returns an unsigned type. Note +/// that this will abort for types that cannot be represented. +const Type *MVT::getTypeForMVT() const { + switch (V) { default: - if (isVector(VT)) - return VectorType::get(getTypeForValueType(getVectorElementType(VT)), - getVectorNumElements(VT)); - if (isInteger(VT)) - return IntegerType::get(getSizeInBits(VT)); - assert(0 && "ValueType does not correspond to LLVM type!"); - case MVT::isVoid:return Type::VoidTy; + if (isVector()) + return VectorType::get(getVectorElementType().getTypeForMVT(), + getVectorNumElements()); + if (isInteger()) + return IntegerType::get(getSizeInBits()); + assert(0 && "MVT does not correspond to LLVM type!"); + return Type::VoidTy; + case MVT::isVoid: return Type::VoidTy; case MVT::i1: return Type::Int1Ty; case MVT::i8: return Type::Int8Ty; case MVT::i16: return Type::Int16Ty; @@ -98,18 +99,19 @@ const Type *MVT::getTypeForValueType(MVT::ValueType VT) { } } -/// MVT::getValueType - Return the value type corresponding to the specified -/// type. This returns all pointers as MVT::iPTR. If HandleUnknown is true, -/// unknown types are returned as Other, otherwise they are invalid. -MVT::ValueType MVT::getValueType(const Type *Ty, bool HandleUnknown) { +/// getMVT - Return the value type corresponding to the specified type. This +/// returns all pointers as MVT::iPTR. If HandleUnknown is true, unknown types +/// are returned as Other, otherwise they are invalid. +MVT MVT::getMVT(const Type *Ty, bool HandleUnknown){ switch (Ty->getTypeID()) { default: if (HandleUnknown) return MVT::Other; assert(0 && "Unknown type!"); + return MVT::isVoid; case Type::VoidTyID: return MVT::isVoid; case Type::IntegerTyID: - return getIntegerType(cast<IntegerType>(Ty)->getBitWidth()); + return getIntegerVT(cast<IntegerType>(Ty)->getBitWidth()); case Type::FloatTyID: return MVT::f32; case Type::DoubleTyID: return MVT::f64; case Type::X86_FP80TyID: return MVT::f80; @@ -118,8 +120,8 @@ MVT::ValueType MVT::getValueType(const Type *Ty, bool HandleUnknown) { case Type::PointerTyID: return MVT::iPTR; case Type::VectorTyID: { const VectorType *VTy = cast<VectorType>(Ty); - return getVectorType(getValueType(VTy->getElementType(), false), - VTy->getNumElements()); + return getVectorVT(getMVT(VTy->getElementType(), false), + VTy->getNumElements()); } } } diff --git a/lib/VMCore/Verifier.cpp b/lib/VMCore/Verifier.cpp index bfa2e65..ac529e3 100644 --- a/lib/VMCore/Verifier.cpp +++ b/lib/VMCore/Verifier.cpp @@ -1331,7 +1331,7 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, // Note that "arg#0" is the return type. for (unsigned ArgNo = 0; ArgNo < Count; ++ArgNo) { - MVT::ValueType VT = va_arg(VA, MVT::ValueType); + int VT = va_arg(VA, int); // An MVT::SimpleValueType when non-negative. if (VT == MVT::isVoid && ArgNo > 0) { if (!FTy->isVarArg()) @@ -1351,8 +1351,8 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, EltTy = VTy->getElementType(); NumElts = VTy->getNumElements(); } - - if ((int)VT < 0) { + + if (VT < 0) { int Match = ~VT; if (Match == 0) { if (Ty != FTy->getReturnType()) { @@ -1403,7 +1403,7 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Suffix += "."; if (EltTy != Ty) Suffix += "v" + utostr(NumElts); - Suffix += MVT::getValueTypeString(MVT::getValueType(EltTy)); + Suffix += MVT::getMVT(EltTy).getMVTString(); } else if (VT == MVT::iPTR) { if (!isa<PointerType>(Ty)) { if (ArgNo == 0) @@ -1414,19 +1414,20 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, "pointer and a pointer is required.", F); break; } - } else if (MVT::isVector(VT)) { + } else if (MVT((MVT::SimpleValueType)VT).isVector()) { + MVT VVT = MVT((MVT::SimpleValueType)VT); // If this is a vector argument, verify the number and type of elements. - if (MVT::getVectorElementType(VT) != MVT::getValueType(EltTy)) { + if (VVT.getVectorElementType() != MVT::getMVT(EltTy)) { CheckFailed("Intrinsic prototype has incorrect vector element type!", F); break; } - if (MVT::getVectorNumElements(VT) != NumElts) { + if (VVT.getVectorNumElements() != NumElts) { CheckFailed("Intrinsic prototype has incorrect number of " "vector elements!",F); break; } - } else if (MVT::getTypeForValueType(VT) != EltTy) { + } else if (MVT((MVT::SimpleValueType)VT).getTypeForMVT() != EltTy) { if (ArgNo == 0) CheckFailed("Intrinsic prototype has incorrect result type!", F); else diff --git a/utils/TableGen/CallingConvEmitter.cpp b/utils/TableGen/CallingConvEmitter.cpp index a211b6a..befad6f 100644 --- a/utils/TableGen/CallingConvEmitter.cpp +++ b/utils/TableGen/CallingConvEmitter.cpp @@ -26,9 +26,9 @@ void CallingConvEmitter::run(std::ostream &O) { // other. for (unsigned i = 0, e = CCs.size(); i != e; ++i) { O << "static bool " << CCs[i]->getName() - << "(unsigned ValNo, MVT::ValueType ValVT,\n" + << "(unsigned ValNo, MVT ValVT,\n" << std::string(CCs[i]->getName().size()+13, ' ') - << "MVT::ValueType LocVT, CCValAssign::LocInfo LocInfo,\n" + << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n" << std::string(CCs[i]->getName().size()+13, ' ') << "ISD::ArgFlagsTy ArgFlags, CCState &State);\n"; } @@ -44,9 +44,9 @@ void CallingConvEmitter::EmitCallingConv(Record *CC, std::ostream &O) { Counter = 0; O << "\n\nstatic bool " << CC->getName() - << "(unsigned ValNo, MVT::ValueType ValVT,\n" + << "(unsigned ValNo, MVT ValVT,\n" << std::string(CC->getName().size()+13, ' ') - << "MVT::ValueType LocVT, CCValAssign::LocInfo LocInfo,\n" + << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n" << std::string(CC->getName().size()+13, ' ') << "ISD::ArgFlagsTy ArgFlags, CCState &State) {\n"; // Emit all of the actions, in order. @@ -163,12 +163,12 @@ void CallingConvEmitter::EmitAction(Record *Action, O << Size << ", "; else O << "\n" << IndentStr << " State.getTarget().getTargetData()" - "->getABITypeSize(MVT::getTypeForValueType(LocVT)), "; + "->getABITypeSize(LocVT.getTypeForMVT()), "; if (Align) O << Align; else O << "\n" << IndentStr << " State.getTarget().getTargetData()" - "->getABITypeAlignment(MVT::getTypeForValueType(LocVT))"; + "->getABITypeAlignment(LocVT.getTypeForMVT())"; O << ");\n" << IndentStr << "State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset" << Counter << ", LocVT, LocInfo));\n"; diff --git a/utils/TableGen/CodeGenDAGPatterns.cpp b/utils/TableGen/CodeGenDAGPatterns.cpp index 8c46b35..882c724 100644 --- a/utils/TableGen/CodeGenDAGPatterns.cpp +++ b/utils/TableGen/CodeGenDAGPatterns.cpp @@ -27,9 +27,9 @@ using namespace llvm; /// FilterVTs - Filter a list of VT's according to a predicate. /// template<typename T> -static std::vector<MVT::ValueType> -FilterVTs(const std::vector<MVT::ValueType> &InVTs, T Filter) { - std::vector<MVT::ValueType> Result; +static std::vector<MVT::SimpleValueType> +FilterVTs(const std::vector<MVT::SimpleValueType> &InVTs, T Filter) { + std::vector<MVT::SimpleValueType> Result; for (unsigned i = 0, e = InVTs.size(); i != e; ++i) if (Filter(InVTs[i])) Result.push_back(InVTs[i]); @@ -41,19 +41,31 @@ static std::vector<unsigned char> FilterEVTs(const std::vector<unsigned char> &InVTs, T Filter) { std::vector<unsigned char> Result; for (unsigned i = 0, e = InVTs.size(); i != e; ++i) - if (Filter((MVT::ValueType)InVTs[i])) + if (Filter((MVT::SimpleValueType)InVTs[i])) Result.push_back(InVTs[i]); return Result; } static std::vector<unsigned char> -ConvertVTs(const std::vector<MVT::ValueType> &InVTs) { +ConvertVTs(const std::vector<MVT::SimpleValueType> &InVTs) { std::vector<unsigned char> Result; for (unsigned i = 0, e = InVTs.size(); i != e; ++i) Result.push_back(InVTs[i]); return Result; } +static inline bool isInteger(MVT::SimpleValueType VT) { + return MVT(VT).isInteger(); +} + +static inline bool isFloatingPoint(MVT::SimpleValueType VT) { + return MVT(VT).isFloatingPoint(); +} + +static inline bool isVector(MVT::SimpleValueType VT) { + return MVT(VT).isVector(); +} + static bool LHSIsSubsetOfRHS(const std::vector<unsigned char> &LHS, const std::vector<unsigned char> &RHS) { if (LHS.size() > RHS.size()) return false; @@ -66,7 +78,7 @@ static bool LHSIsSubsetOfRHS(const std::vector<unsigned char> &LHS, /// isExtIntegerVT - Return true if the specified extended value type vector /// contains isInt or an integer value type. namespace llvm { -namespace MVT { +namespace EMVT { bool isExtIntegerInVTs(const std::vector<unsigned char> &EVTs) { assert(!EVTs.empty() && "Cannot check for integer in empty ExtVT list!"); return EVTs[0] == isInt || !(FilterEVTs(EVTs, isInteger).empty()); @@ -78,7 +90,7 @@ bool isExtFloatingPointInVTs(const std::vector<unsigned char> &EVTs) { assert(!EVTs.empty() && "Cannot check for integer in empty ExtVT list!"); return EVTs[0] == isFP || !(FilterEVTs(EVTs, isFloatingPoint).empty()); } -} // end namespace MVT. +} // end namespace EMVT. } // end namespace llvm. @@ -223,23 +235,23 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N, } case SDTCisInt: { // If there is only one integer type supported, this must be it. - std::vector<MVT::ValueType> IntVTs = - FilterVTs(CGT.getLegalValueTypes(), MVT::isInteger); + std::vector<MVT::SimpleValueType> IntVTs = + FilterVTs(CGT.getLegalValueTypes(), isInteger); // If we found exactly one supported integer type, apply it. if (IntVTs.size() == 1) return NodeToApply->UpdateNodeType(IntVTs[0], TP); - return NodeToApply->UpdateNodeType(MVT::isInt, TP); + return NodeToApply->UpdateNodeType(EMVT::isInt, TP); } case SDTCisFP: { // If there is only one FP type supported, this must be it. - std::vector<MVT::ValueType> FPVTs = - FilterVTs(CGT.getLegalValueTypes(), MVT::isFloatingPoint); + std::vector<MVT::SimpleValueType> FPVTs = + FilterVTs(CGT.getLegalValueTypes(), isFloatingPoint); // If we found exactly one supported FP type, apply it. if (FPVTs.size() == 1) return NodeToApply->UpdateNodeType(FPVTs[0], TP); - return NodeToApply->UpdateNodeType(MVT::isFP, TP); + return NodeToApply->UpdateNodeType(EMVT::isFP, TP); } case SDTCisSameAs: { TreePatternNode *OtherNode = @@ -255,9 +267,9 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N, !static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef() ->isSubClassOf("ValueType")) TP.error(N->getOperator()->getName() + " expects a VT operand!"); - MVT::ValueType VT = + MVT::SimpleValueType VT = getValueType(static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef()); - if (!MVT::isInteger(VT)) + if (!isInteger(VT)) TP.error(N->getOperator()->getName() + " VT operand must be integer!"); TreePatternNode *OtherNode = @@ -265,7 +277,7 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N, // It must be integer. bool MadeChange = false; - MadeChange |= OtherNode->UpdateNodeType(MVT::isInt, TP); + MadeChange |= OtherNode->UpdateNodeType(EMVT::isInt, TP); // This code only handles nodes that have one type set. Assert here so // that we can change this if we ever need to deal with multiple value @@ -285,26 +297,26 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N, // This code does not currently handle nodes which have multiple types, // where some types are integer, and some are fp. Assert that this is not // the case. - assert(!(MVT::isExtIntegerInVTs(NodeToApply->getExtTypes()) && - MVT::isExtFloatingPointInVTs(NodeToApply->getExtTypes())) && - !(MVT::isExtIntegerInVTs(BigOperand->getExtTypes()) && - MVT::isExtFloatingPointInVTs(BigOperand->getExtTypes())) && + assert(!(EMVT::isExtIntegerInVTs(NodeToApply->getExtTypes()) && + EMVT::isExtFloatingPointInVTs(NodeToApply->getExtTypes())) && + !(EMVT::isExtIntegerInVTs(BigOperand->getExtTypes()) && + EMVT::isExtFloatingPointInVTs(BigOperand->getExtTypes())) && "SDTCisOpSmallerThanOp does not handle mixed int/fp types!"); - if (MVT::isExtIntegerInVTs(NodeToApply->getExtTypes())) - MadeChange |= BigOperand->UpdateNodeType(MVT::isInt, TP); - else if (MVT::isExtFloatingPointInVTs(NodeToApply->getExtTypes())) - MadeChange |= BigOperand->UpdateNodeType(MVT::isFP, TP); - if (MVT::isExtIntegerInVTs(BigOperand->getExtTypes())) - MadeChange |= NodeToApply->UpdateNodeType(MVT::isInt, TP); - else if (MVT::isExtFloatingPointInVTs(BigOperand->getExtTypes())) - MadeChange |= NodeToApply->UpdateNodeType(MVT::isFP, TP); - - std::vector<MVT::ValueType> VTs = CGT.getLegalValueTypes(); - - if (MVT::isExtIntegerInVTs(NodeToApply->getExtTypes())) { - VTs = FilterVTs(VTs, MVT::isInteger); - } else if (MVT::isExtFloatingPointInVTs(NodeToApply->getExtTypes())) { - VTs = FilterVTs(VTs, MVT::isFloatingPoint); + if (EMVT::isExtIntegerInVTs(NodeToApply->getExtTypes())) + MadeChange |= BigOperand->UpdateNodeType(EMVT::isInt, TP); + else if (EMVT::isExtFloatingPointInVTs(NodeToApply->getExtTypes())) + MadeChange |= BigOperand->UpdateNodeType(EMVT::isFP, TP); + if (EMVT::isExtIntegerInVTs(BigOperand->getExtTypes())) + MadeChange |= NodeToApply->UpdateNodeType(EMVT::isInt, TP); + else if (EMVT::isExtFloatingPointInVTs(BigOperand->getExtTypes())) + MadeChange |= NodeToApply->UpdateNodeType(EMVT::isFP, TP); + + std::vector<MVT::SimpleValueType> VTs = CGT.getLegalValueTypes(); + + if (EMVT::isExtIntegerInVTs(NodeToApply->getExtTypes())) { + VTs = FilterVTs(VTs, isInteger); + } else if (EMVT::isExtFloatingPointInVTs(NodeToApply->getExtTypes())) { + VTs = FilterVTs(VTs, isFloatingPoint); } else { VTs.clear(); } @@ -331,11 +343,12 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N, getOperandNum(x.SDTCisIntVectorOfSameSize_Info.OtherOperandNum, N, NumResults); if (OtherOperand->hasTypeSet()) { - if (!MVT::isVector(OtherOperand->getTypeNum(0))) + if (!isVector(OtherOperand->getTypeNum(0))) TP.error(N->getOperator()->getName() + " VT operand must be a vector!"); - MVT::ValueType IVT = OtherOperand->getTypeNum(0); - IVT = MVT::getIntVectorWithNumElements(MVT::getVectorNumElements(IVT)); - return NodeToApply->UpdateNodeType(IVT, TP); + MVT IVT = OtherOperand->getTypeNum(0); + unsigned NumElements = IVT.getVectorNumElements(); + IVT = MVT::getIntVectorWithNumElements(NumElements); + return NodeToApply->UpdateNodeType(IVT.getSimpleVT(), TP); } return false; } @@ -344,11 +357,11 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N, getOperandNum(x.SDTCisIntVectorOfSameSize_Info.OtherOperandNum, N, NumResults); if (OtherOperand->hasTypeSet()) { - if (!MVT::isVector(OtherOperand->getTypeNum(0))) + if (!isVector(OtherOperand->getTypeNum(0))) TP.error(N->getOperator()->getName() + " VT operand must be a vector!"); - MVT::ValueType IVT = OtherOperand->getTypeNum(0); - IVT = MVT::getVectorElementType(IVT); - return NodeToApply->UpdateNodeType(IVT, TP); + MVT IVT = OtherOperand->getTypeNum(0); + IVT = IVT.getVectorElementType(); + return NodeToApply->UpdateNodeType(IVT.getSimpleVT(), TP); } return false; } @@ -421,7 +434,7 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs, TreePattern &TP) { assert(!ExtVTs.empty() && "Cannot update node type with empty type vector!"); - if (ExtVTs[0] == MVT::isUnknown || LHSIsSubsetOfRHS(getExtTypes(), ExtVTs)) + if (ExtVTs[0] == EMVT::isUnknown || LHSIsSubsetOfRHS(getExtTypes(), ExtVTs)) return false; if (isTypeCompletelyUnknown() || LHSIsSubsetOfRHS(ExtVTs, getExtTypes())) { setTypes(ExtVTs); @@ -429,10 +442,10 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs, } if (getExtTypeNum(0) == MVT::iPTR) { - if (ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::isInt) + if (ExtVTs[0] == MVT::iPTR || ExtVTs[0] == EMVT::isInt) return false; - if (MVT::isExtIntegerInVTs(ExtVTs)) { - std::vector<unsigned char> FVTs = FilterEVTs(ExtVTs, MVT::isInteger); + if (EMVT::isExtIntegerInVTs(ExtVTs)) { + std::vector<unsigned char> FVTs = FilterEVTs(ExtVTs, isInteger); if (FVTs.size()) { setTypes(ExtVTs); return true; @@ -440,17 +453,17 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs, } } - if (ExtVTs[0] == MVT::isInt && MVT::isExtIntegerInVTs(getExtTypes())) { + if (ExtVTs[0] == EMVT::isInt && EMVT::isExtIntegerInVTs(getExtTypes())) { assert(hasTypeSet() && "should be handled above!"); - std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), MVT::isInteger); + std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), isInteger); if (getExtTypes() == FVTs) return false; setTypes(FVTs); return true; } - if (ExtVTs[0] == MVT::iPTR && MVT::isExtIntegerInVTs(getExtTypes())) { + if (ExtVTs[0] == MVT::iPTR && EMVT::isExtIntegerInVTs(getExtTypes())) { //assert(hasTypeSet() && "should be handled above!"); - std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), MVT::isInteger); + std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), isInteger); if (getExtTypes() == FVTs) return false; if (FVTs.size()) { @@ -458,10 +471,10 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs, return true; } } - if (ExtVTs[0] == MVT::isFP && MVT::isExtFloatingPointInVTs(getExtTypes())) { + if (ExtVTs[0] == EMVT::isFP && EMVT::isExtFloatingPointInVTs(getExtTypes())) { assert(hasTypeSet() && "should be handled above!"); std::vector<unsigned char> FVTs = - FilterEVTs(getExtTypes(), MVT::isFloatingPoint); + FilterEVTs(getExtTypes(), isFloatingPoint); if (getExtTypes() == FVTs) return false; setTypes(FVTs); @@ -473,12 +486,14 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs, // // Similarly, we should probably set the type here to the intersection of // {isInt|isFP} and ExtVTs - if ((getExtTypeNum(0) == MVT::isInt && MVT::isExtIntegerInVTs(ExtVTs)) || - (getExtTypeNum(0) == MVT::isFP && MVT::isExtFloatingPointInVTs(ExtVTs))){ + if ((getExtTypeNum(0) == EMVT::isInt && + EMVT::isExtIntegerInVTs(ExtVTs)) || + (getExtTypeNum(0) == EMVT::isFP && + EMVT::isExtFloatingPointInVTs(ExtVTs))) { setTypes(ExtVTs); return true; } - if (getExtTypeNum(0) == MVT::isInt && ExtVTs[0] == MVT::iPTR) { + if (getExtTypeNum(0) == EMVT::isInt && ExtVTs[0] == MVT::iPTR) { setTypes(ExtVTs); return true; } @@ -506,9 +521,9 @@ void TreePatternNode::print(std::ostream &OS) const { // nodes that are multiply typed. switch (getExtTypeNum(0)) { case MVT::Other: OS << ":Other"; break; - case MVT::isInt: OS << ":isInt"; break; - case MVT::isFP : OS << ":isFP"; break; - case MVT::isUnknown: ; /*OS << ":?";*/ break; + case EMVT::isInt: OS << ":isInt"; break; + case EMVT::isFP : OS << ":isFP"; break; + case EMVT::isUnknown: ; /*OS << ":?";*/ break; case MVT::iPTR: OS << ":iPTR"; break; default: { std::string VTName = llvm::getName(getTypeNum(0)); @@ -672,7 +687,7 @@ TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) { static std::vector<unsigned char> getImplicitType(Record *R, bool NotRegisters, TreePattern &TP) { // Some common return values - std::vector<unsigned char> Unknown(1, MVT::isUnknown); + std::vector<unsigned char> Unknown(1, EMVT::isUnknown); std::vector<unsigned char> Other(1, MVT::Other); // Check to see if this is a register or a register class... @@ -740,27 +755,29 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { return UpdateNodeType(getImplicitType(DI->getDef(), NotRegisters, TP),TP); } else if (IntInit *II = dynamic_cast<IntInit*>(getLeafValue())) { // Int inits are always integers. :) - bool MadeChange = UpdateNodeType(MVT::isInt, TP); + bool MadeChange = UpdateNodeType(EMVT::isInt, TP); if (hasTypeSet()) { // At some point, it may make sense for this tree pattern to have // multiple types. Assert here that it does not, so we revisit this // code when appropriate. assert(getExtTypes().size() >= 1 && "TreePattern doesn't have a type!"); - MVT::ValueType VT = getTypeNum(0); + MVT::SimpleValueType VT = getTypeNum(0); for (unsigned i = 1, e = getExtTypes().size(); i != e; ++i) assert(getTypeNum(i) == VT && "TreePattern has too many types!"); VT = getTypeNum(0); if (VT != MVT::iPTR) { - unsigned Size = MVT::getSizeInBits(VT); + unsigned Size = MVT(VT).getSizeInBits(); // Make sure that the value is representable for this type. if (Size < 32) { int Val = (II->getValue() << (32-Size)) >> (32-Size); if (Val != II->getValue()) { // If sign-extended doesn't fit, does it fit as unsigned? - unsigned ValueMask = unsigned(MVT::getIntVTBitMask(VT)); - unsigned UnsignedVal = unsigned(II->getValue()); + unsigned ValueMask; + unsigned UnsignedVal; + ValueMask = unsigned(MVT(VT).getIntegerVTBitMask()); + UnsignedVal = unsigned(II->getValue()); if ((ValueMask & UnsignedVal) != UnsignedVal) { TP.error("Integer value '" + itostr(II->getValue())+ @@ -803,10 +820,10 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { return MadeChange; } else if (const CodeGenIntrinsic *Int = getIntrinsicInfo(CDP)) { bool MadeChange = false; - + // Apply the result type to the node. MadeChange = UpdateNodeType(Int->ArgVTs[0], TP); - + if (getNumChildren() != Int->ArgVTs.size()) TP.error("Intrinsic '" + Int->Name + "' expects " + utostr(Int->ArgVTs.size()-1) + " operands, not " + @@ -816,7 +833,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { MadeChange |= getChild(0)->UpdateNodeType(MVT::iPTR, TP); for (unsigned i = 1, e = getNumChildren(); i != e; ++i) { - MVT::ValueType OpVT = Int->ArgVTs[i]; + MVT::SimpleValueType OpVT = Int->ArgVTs[i]; MadeChange |= getChild(i)->UpdateNodeType(OpVT, TP); MadeChange |= getChild(i)->ApplyTypeConstraints(TP, NotRegisters); } @@ -838,11 +855,11 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { if (getOperator()->getName() == "vector_shuffle" && getChild(2)->getOperator()->getName() == "build_vector") { TreePatternNode *BV = getChild(2); - const std::vector<MVT::ValueType> &LegalVTs + const std::vector<MVT::SimpleValueType> &LegalVTs = CDP.getTargetInfo().getLegalValueTypes(); - MVT::ValueType LegalIntVT = MVT::Other; + MVT::SimpleValueType LegalIntVT = MVT::Other; for (unsigned i = 0, e = LegalVTs.size(); i != e; ++i) - if (MVT::isInteger(LegalVTs[i]) && !MVT::isVector(LegalVTs[i])) { + if (isInteger(LegalVTs[i]) && !isVector(LegalVTs[i])) { LegalIntVT = LegalVTs[i]; break; } @@ -874,7 +891,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { MadeChange = UpdateNodeType(VT, TP); } else if (ResultNode->getName() == "unknown") { std::vector<unsigned char> VT; - VT.push_back(MVT::isUnknown); + VT.push_back(EMVT::isUnknown); MadeChange = UpdateNodeType(VT, TP); } else { assert(ResultNode->isSubClassOf("RegisterClass") && @@ -903,7 +920,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { TP.error("Instruction '" + getOperator()->getName() + "' expects more operands than were provided."); - MVT::ValueType VT; + MVT::SimpleValueType VT; TreePatternNode *Child = getChild(ChildNo++); if (OperandNode->isSubClassOf("RegisterClass")) { const CodeGenRegisterClass &RC = @@ -915,7 +932,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { } else if (OperandNode->getName() == "ptr_rc") { MadeChange |= Child->UpdateNodeType(MVT::iPTR, TP); } else if (OperandNode->getName() == "unknown") { - MadeChange |= Child->UpdateNodeType(MVT::isUnknown, TP); + MadeChange |= Child->UpdateNodeType(EMVT::isUnknown, TP); } else { assert(0 && "Unknown operand type!"); abort(); diff --git a/utils/TableGen/CodeGenDAGPatterns.h b/utils/TableGen/CodeGenDAGPatterns.h index 0d29eb5..40cfa88 100644 --- a/utils/TableGen/CodeGenDAGPatterns.h +++ b/utils/TableGen/CodeGenDAGPatterns.h @@ -31,15 +31,15 @@ namespace llvm { class CodeGenDAGPatterns; class ComplexPattern; -/// MVT::DAGISelGenValueType - These are some extended forms of MVT::ValueType -/// that we use as lattice values during type inferrence. -namespace MVT { +/// EMVT::DAGISelGenValueType - These are some extended forms of +/// MVT::SimpleValueType that we use as lattice values during type inference. +namespace EMVT { enum DAGISelGenValueType { isFP = MVT::LAST_VALUETYPE, isInt, isUnknown }; - + /// isExtIntegerVT - Return true if the specified extended value type vector /// contains isInt or an integer value type. bool isExtIntegerInVTs(const std::vector<unsigned char> &EVTs); @@ -66,7 +66,7 @@ struct SDTypeConstraint { union { // The discriminated union. struct { - MVT::ValueType VT; + unsigned char VT; } SDTCisVT_Info; struct { unsigned OtherOperandNum; @@ -142,7 +142,7 @@ public: /// patterns), and as such should be ref counted. We currently just leak all /// TreePatternNode objects! class TreePatternNode { - /// The inferred type for this node, or MVT::isUnknown if it hasn't + /// The inferred type for this node, or EMVT::isUnknown if it hasn't /// been determined yet. std::vector<unsigned char> Types; @@ -170,10 +170,10 @@ class TreePatternNode { public: TreePatternNode(Record *Op, const std::vector<TreePatternNode*> &Ch) : Types(), Operator(Op), Val(0), TransformFn(0), - Children(Ch) { Types.push_back(MVT::isUnknown); } + Children(Ch) { Types.push_back(EMVT::isUnknown); } TreePatternNode(Init *val) // leaf ctor : Types(), Operator(0), Val(val), TransformFn(0) { - Types.push_back(MVT::isUnknown); + Types.push_back(EMVT::isUnknown); } ~TreePatternNode(); @@ -185,15 +185,15 @@ public: return (Types[0] < MVT::LAST_VALUETYPE) || (Types[0] == MVT::iPTR); } bool isTypeCompletelyUnknown() const { - return Types[0] == MVT::isUnknown; + return Types[0] == EMVT::isUnknown; } bool isTypeDynamicallyResolved() const { return Types[0] == MVT::iPTR; } - MVT::ValueType getTypeNum(unsigned Num) const { + MVT::SimpleValueType getTypeNum(unsigned Num) const { assert(hasTypeSet() && "Doesn't have a type yet!"); assert(Types.size() > Num && "Type num out of range!"); - return (MVT::ValueType)Types[Num]; + return (MVT::SimpleValueType)Types[Num]; } unsigned char getExtTypeNum(unsigned Num) const { assert(Types.size() > Num && "Extended type num out of range!"); @@ -201,7 +201,7 @@ public: } const std::vector<unsigned char> &getExtTypes() const { return Types; } void setTypes(const std::vector<unsigned char> &T) { Types = T; } - void removeTypes() { Types = std::vector<unsigned char>(1,MVT::isUnknown); } + void removeTypes() { Types = std::vector<unsigned char>(1, EMVT::isUnknown); } Init *getLeafValue() const { assert(isLeaf()); return Val; } Record *getOperator() const { assert(!isLeaf()); return Operator; } diff --git a/utils/TableGen/CodeGenIntrinsics.h b/utils/TableGen/CodeGenIntrinsics.h index 4515ebc..a66c30b 100644 --- a/utils/TableGen/CodeGenIntrinsics.h +++ b/utils/TableGen/CodeGenIntrinsics.h @@ -29,13 +29,13 @@ namespace llvm { std::string EnumName; // The name of the enum "bswap_i32" std::string GCCBuiltinName;// Name of the corresponding GCC builtin, or "". std::string TargetPrefix; // Target prefix, e.g. "ppc" for t-s intrinsics. - - /// ArgVTs - The MVT::ValueType for each argument type. Note that this list - /// is only populated when in the context of a target .td file. When - /// building Intrinsics.td, this isn't available, because we don't know the - /// target pointer size. - std::vector<MVT::ValueType> ArgVTs; - + + /// ArgVTs - The MVT::SimpleValueType for each argument type. Note that + /// this list is only populated when in the context of a target .td file. + /// When building Intrinsics.td, this isn't available, because we don't know + /// the target pointer size. + std::vector<MVT::SimpleValueType> ArgVTs; + /// ArgTypeDefs - The records for each argument type. /// std::vector<Record*> ArgTypeDefs; diff --git a/utils/TableGen/CodeGenRegisters.h b/utils/TableGen/CodeGenRegisters.h index 0f1b501..6f8682b 100644 --- a/utils/TableGen/CodeGenRegisters.h +++ b/utils/TableGen/CodeGenRegisters.h @@ -36,7 +36,7 @@ namespace llvm { Record *TheDef; std::string Namespace; std::vector<Record*> Elements; - std::vector<MVT::ValueType> VTs; + std::vector<MVT::SimpleValueType> VTs; unsigned SpillSize; unsigned SpillAlignment; int CopyCost; @@ -44,10 +44,10 @@ namespace llvm { std::string MethodProtos, MethodBodies; const std::string &getName() const; - const std::vector<MVT::ValueType> &getValueTypes() const { return VTs; } + const std::vector<MVT::SimpleValueType> &getValueTypes() const {return VTs;} unsigned getNumValueTypes() const { return VTs.size(); } - MVT::ValueType getValueTypeNum(unsigned VTNum) const { + MVT::SimpleValueType getValueTypeNum(unsigned VTNum) const { if (VTNum < VTs.size()) return VTs[VTNum]; assert(0 && "VTNum greater than number of ValueTypes in RegClass!"); diff --git a/utils/TableGen/CodeGenTarget.cpp b/utils/TableGen/CodeGenTarget.cpp index 78d39ee..a76f5cd 100644 --- a/utils/TableGen/CodeGenTarget.cpp +++ b/utils/TableGen/CodeGenTarget.cpp @@ -27,13 +27,13 @@ static cl::opt<unsigned> AsmWriterNum("asmwriternum", cl::init(0), cl::desc("Make -gen-asm-writer emit assembly writer #N")); -/// getValueType - Return the MCV::ValueType that the specified TableGen record -/// corresponds to. -MVT::ValueType llvm::getValueType(Record *Rec) { - return (MVT::ValueType)Rec->getValueAsInt("Value"); +/// getValueType - Return the MVT::SimpleValueType that the specified TableGen +/// record corresponds to. +MVT::SimpleValueType llvm::getValueType(Record *Rec) { + return (MVT::SimpleValueType)Rec->getValueAsInt("Value"); } -std::string llvm::getName(MVT::ValueType T) { +std::string llvm::getName(MVT::SimpleValueType T) { switch (T) { case MVT::Other: return "UNKNOWN"; case MVT::i1: return "MVT::i1"; @@ -69,7 +69,7 @@ std::string llvm::getName(MVT::ValueType T) { } } -std::string llvm::getEnumName(MVT::ValueType T) { +std::string llvm::getEnumName(MVT::SimpleValueType T) { switch (T) { case MVT::Other: return "MVT::Other"; case MVT::i1: return "MVT::i1"; @@ -181,7 +181,7 @@ std::vector<unsigned char> CodeGenTarget::getRegisterVTs(Record *R) const { const CodeGenRegisterClass &RC = RegisterClasses[i]; for (unsigned ei = 0, ee = RC.Elements.size(); ei != ee; ++ei) { if (R == RC.Elements[ei]) { - const std::vector<MVT::ValueType> &InVTs = RC.getValueTypes(); + const std::vector<MVT::SimpleValueType> &InVTs = RC.getValueTypes(); for (unsigned i = 0, e = InVTs.size(); i != e; ++i) Result.push_back(InVTs[i]); } @@ -231,7 +231,7 @@ CodeGenRegisterClass::CodeGenRegisterClass(Record *R) : TheDef(R) { unsigned Size = R->getValueAsInt("Size"); Namespace = R->getValueAsString("Namespace"); - SpillSize = Size ? Size : MVT::getSizeInBits(VTs[0]); + SpillSize = Size ? Size : MVT(VTs[0]).getSizeInBits(); SpillAlignment = R->getValueAsInt("Alignment"); CopyCost = R->getValueAsInt("CopyCost"); MethodBodies = R->getValueAsCode("MethodBodies"); @@ -443,7 +443,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) { for (unsigned i = 0, e = TypeList->getSize(); i != e; ++i) { Record *TyEl = TypeList->getElementAsRecord(i); assert(TyEl->isSubClassOf("LLVMType") && "Expected a type!"); - MVT::ValueType VT = getValueType(TyEl->getValueAsDef("VT")); + MVT::SimpleValueType VT = getValueType(TyEl->getValueAsDef("VT")); isOverloaded |= VT == MVT::iAny || VT == MVT::fAny; ArgVTs.push_back(VT); ArgTypeDefs.push_back(TyEl); diff --git a/utils/TableGen/CodeGenTarget.h b/utils/TableGen/CodeGenTarget.h index b066b6a..1191488 100644 --- a/utils/TableGen/CodeGenTarget.h +++ b/utils/TableGen/CodeGenTarget.h @@ -45,12 +45,12 @@ enum SDNP { // ComplexPattern attributes. enum CPAttr { CPAttrParentAsRoot }; -/// getValueType - Return the MVT::ValueType that the specified TableGen record -/// corresponds to. -MVT::ValueType getValueType(Record *Rec); +/// getValueType - Return the MVT::SimpleValueType that the specified TableGen +/// record corresponds to. +MVT::SimpleValueType getValueType(Record *Rec); -std::string getName(MVT::ValueType T); -std::string getEnumName(MVT::ValueType T); +std::string getName(MVT::SimpleValueType T); +std::string getEnumName(MVT::SimpleValueType T); /// getQualifiedName - Return the name of the specified record, with a /// namespace qualifier if the record contains one. @@ -64,7 +64,7 @@ class CodeGenTarget { mutable std::map<std::string, CodeGenInstruction> Instructions; mutable std::vector<CodeGenRegister> Registers; mutable std::vector<CodeGenRegisterClass> RegisterClasses; - mutable std::vector<MVT::ValueType> LegalValueTypes; + mutable std::vector<MVT::SimpleValueType> LegalValueTypes; void ReadRegisters() const; void ReadRegisterClasses() const; void ReadInstructions() const; @@ -121,19 +121,19 @@ public: return FoundRC; } - /// getRegisterVTs - Find the union of all possible ValueTypes for the + /// getRegisterVTs - Find the union of all possible SimpleValueTypes for the /// specified physical register. std::vector<unsigned char> getRegisterVTs(Record *R) const; - const std::vector<MVT::ValueType> &getLegalValueTypes() const { + const std::vector<MVT::SimpleValueType> &getLegalValueTypes() const { if (LegalValueTypes.empty()) ReadLegalValueTypes(); return LegalValueTypes; } /// isLegalValueType - Return true if the specified value type is natively /// supported by the target (i.e. there are registers that directly hold it). - bool isLegalValueType(MVT::ValueType VT) const { - const std::vector<MVT::ValueType> &LegalVTs = getLegalValueTypes(); + bool isLegalValueType(MVT::SimpleValueType VT) const { + const std::vector<MVT::SimpleValueType> &LegalVTs = getLegalValueTypes(); for (unsigned i = 0, e = LegalVTs.size(); i != e; ++i) if (LegalVTs[i] == VT) return true; return false; @@ -175,7 +175,7 @@ public: /// ComplexPattern - ComplexPattern info, corresponding to the ComplexPattern /// tablegen class in TargetSelectionDAG.td class ComplexPattern { - MVT::ValueType Ty; + MVT::SimpleValueType Ty; unsigned NumOperands; std::string SelectFunc; std::vector<Record*> RootNodes; @@ -185,7 +185,7 @@ public: ComplexPattern() : NumOperands(0) {}; ComplexPattern(Record *R); - MVT::ValueType getValueType() const { return Ty; } + MVT::SimpleValueType getValueType() const { return Ty; } unsigned getNumOperands() const { return NumOperands; } const std::string &getSelectFunc() const { return SelectFunc; } const std::vector<Record*> &getRootNodes() const { diff --git a/utils/TableGen/DAGISelEmitter.cpp b/utils/TableGen/DAGISelEmitter.cpp index a4d08c6..8d89eee 100644 --- a/utils/TableGen/DAGISelEmitter.cpp +++ b/utils/TableGen/DAGISelEmitter.cpp @@ -51,8 +51,8 @@ static const ComplexPattern *NodeGetComplexPattern(TreePatternNode *N, /// patterns before small ones. This is used to determine the size of a /// pattern. static unsigned getPatternSize(TreePatternNode *P, CodeGenDAGPatterns &CGP) { - assert((MVT::isExtIntegerInVTs(P->getExtTypes()) || - MVT::isExtFloatingPointInVTs(P->getExtTypes()) || + assert((EMVT::isExtIntegerInVTs(P->getExtTypes()) || + EMVT::isExtFloatingPointInVTs(P->getExtTypes()) || P->getExtTypeNum(0) == MVT::isVoid || P->getExtTypeNum(0) == MVT::Flag || P->getExtTypeNum(0) == MVT::iPTR) && @@ -160,7 +160,7 @@ struct PatternSortingPredicate { /// getRegisterValueType - Look up and return the first ValueType of specified /// RegisterClass record -static MVT::ValueType getRegisterValueType(Record *R, const CodeGenTarget &T) { +static MVT::SimpleValueType getRegisterValueType(Record *R, const CodeGenTarget &T) { if (const CodeGenRegisterClass *RC = T.getRegisterClassForRegister(R)) return RC->getValueTypeNum(0); return MVT::Other; @@ -932,7 +932,7 @@ public: // How many results is this pattern expected to produce? unsigned NumPatResults = 0; for (unsigned i = 0, e = Pattern->getExtTypes().size(); i != e; i++) { - MVT::ValueType VT = Pattern->getTypeNum(i); + MVT::SimpleValueType VT = Pattern->getTypeNum(i); if (VT != MVT::isVoid && VT != MVT::Flag) NumPatResults++; } @@ -1045,7 +1045,7 @@ public: for (unsigned i = 0; i < NumDstRegs; i++) { Record *RR = DstRegs[i]; if (RR->isSubClassOf("Register")) { - MVT::ValueType RVT = getRegisterValueType(RR, CGT); + MVT::SimpleValueType RVT = getRegisterValueType(RR, CGT); Code += ", " + getEnumName(RVT); } } @@ -1311,7 +1311,7 @@ private: Record *RR = DI->getDef(); if (RR->isSubClassOf("Register")) { - MVT::ValueType RVT = getRegisterValueType(RR, T); + MVT::SimpleValueType RVT = getRegisterValueType(RR, T); if (RVT == MVT::Flag) { if (!InFlagDecled) { emitCode("SDOperand InFlag = " + RootName + utostr(OpNo) + ";"); @@ -1634,12 +1634,13 @@ void DAGISelEmitter::EmitInstructionSelector(std::ostream &OS) { PatternSortingPredicate(CGP)); // Split them into groups by type. - std::map<MVT::ValueType, std::vector<const PatternToMatch*> >PatternsByType; + std::map<MVT::SimpleValueType, + std::vector<const PatternToMatch*> > PatternsByType; for (unsigned i = 0, e = PatternsOfOp.size(); i != e; ++i) { const PatternToMatch *Pat = PatternsOfOp[i]; TreePatternNode *SrcPat = Pat->getSrcPattern(); - MVT::ValueType VT = SrcPat->getTypeNum(0); - std::map<MVT::ValueType, + MVT::SimpleValueType VT = SrcPat->getTypeNum(0); + std::map<MVT::SimpleValueType, std::vector<const PatternToMatch*> >::iterator TI = PatternsByType.find(VT); if (TI != PatternsByType.end()) @@ -1651,10 +1652,11 @@ void DAGISelEmitter::EmitInstructionSelector(std::ostream &OS) { } } - for (std::map<MVT::ValueType, std::vector<const PatternToMatch*> >::iterator + for (std::map<MVT::SimpleValueType, + std::vector<const PatternToMatch*> >::iterator II = PatternsByType.begin(), EE = PatternsByType.end(); II != EE; ++II) { - MVT::ValueType OpVT = II->first; + MVT::SimpleValueType OpVT = II->first; std::vector<const PatternToMatch*> &Patterns = II->second; typedef std::vector<std::pair<unsigned,std::string> > CodeList; typedef std::vector<std::pair<unsigned,std::string> >::iterator CodeListI; @@ -1734,7 +1736,7 @@ void DAGISelEmitter::EmitInstructionSelector(std::ostream &OS) { CallerCode += ", " + TargetOpcodes[j]; } for (unsigned j = 0, e = TargetVTs.size(); j != e; ++j) { - CalleeCode += ", MVT::ValueType VT" + utostr(j); + CalleeCode += ", MVT VT" + utostr(j); CallerCode += ", " + TargetVTs[j]; } for (std::set<std::string>::iterator @@ -1852,7 +1854,7 @@ void DAGISelEmitter::EmitInstructionSelector(std::ostream &OS) { << " for (unsigned j = 0, e = Ops.size(); j != e; ++j)\n" << " AddToISelQueue(Ops[j]);\n\n" - << " std::vector<MVT::ValueType> VTs;\n" + << " std::vector<MVT> VTs;\n" << " VTs.push_back(MVT::Other);\n" << " VTs.push_back(MVT::Flag);\n" << " SDOperand New = CurDAG->getNode(ISD::INLINEASM, VTs, &Ops[0], " @@ -1931,7 +1933,7 @@ void DAGISelEmitter::EmitInstructionSelector(std::ostream &OS) { << "INSTRUCTION_LIST_END)) {\n" << " return NULL; // Already selected.\n" << " }\n\n" - << " MVT::ValueType NVT = N.Val->getValueType(0);\n" + << " MVT::SimpleValueType NVT = N.Val->getValueType(0).getSimpleVT();\n" << " switch (N.getOpcode()) {\n" << " default: break;\n" << " case ISD::EntryToken: // These leaves remain the same.\n" @@ -2008,7 +2010,7 @@ void DAGISelEmitter::EmitInstructionSelector(std::ostream &OS) { // If there is an iPTR result version of this pattern, emit it here. if (HasPtrPattern) { - OS << " if (NVT == TLI.getPointerTy())\n"; + OS << " if (TLI.getPointerTy() == NVT)\n"; OS << " return Select_" << getLegalCName(OpName) <<"_iPTR(N);\n"; } if (HasDefaultPattern) { diff --git a/utils/TableGen/IntrinsicEmitter.cpp b/utils/TableGen/IntrinsicEmitter.cpp index 567973a..ab7b58b 100644 --- a/utils/TableGen/IntrinsicEmitter.cpp +++ b/utils/TableGen/IntrinsicEmitter.cpp @@ -114,9 +114,9 @@ EmitIntrinsicToNameTable(const std::vector<CodeGenIntrinsic> &Ints, OS << "#endif\n\n"; } -static void EmitTypeForValueType(std::ostream &OS, MVT::ValueType VT) { - if (MVT::isInteger(VT)) { - unsigned BitWidth = MVT::getSizeInBits(VT); +static void EmitTypeForValueType(std::ostream &OS, MVT::SimpleValueType VT) { + if (MVT(VT).isInteger()) { + unsigned BitWidth = MVT(VT).getSizeInBits(); OS << "IntegerType::get(" << BitWidth << ")"; } else if (VT == MVT::Other) { // MVT::OtherVT is used to mean the empty struct type here. @@ -140,7 +140,7 @@ static void EmitTypeForValueType(std::ostream &OS, MVT::ValueType VT) { static void EmitTypeGenerate(std::ostream &OS, Record *ArgType, unsigned &ArgNo) { - MVT::ValueType VT = getValueType(ArgType->getValueAsDef("VT")); + MVT::SimpleValueType VT = getValueType(ArgType->getValueAsDef("VT")); if (ArgType->isSubClassOf("LLVMMatchType")) { unsigned Number = ArgType->getValueAsInt("Number"); @@ -153,10 +153,11 @@ static void EmitTypeGenerate(std::ostream &OS, Record *ArgType, // increment it when we actually hit an overloaded type. Getting this wrong // leads to very subtle bugs! OS << "Tys[" << ArgNo++ << "]"; - } else if (MVT::isVector(VT)) { + } else if (MVT(VT).isVector()) { + MVT VVT = VT; OS << "VectorType::get("; - EmitTypeForValueType(OS, MVT::getVectorElementType(VT)); - OS << ", " << MVT::getVectorNumElements(VT) << ")"; + EmitTypeForValueType(OS, VVT.getVectorElementType().getSimpleVT()); + OS << ", " << VVT.getVectorNumElements() << ")"; } else if (VT == MVT::iPTR) { OS << "PointerType::getUnqual("; EmitTypeGenerate(OS, ArgType->getValueAsDef("ElTy"), ArgNo); @@ -225,7 +226,7 @@ void IntrinsicEmitter::EmitVerifier(const std::vector<CodeGenIntrinsic> &Ints, assert(Number < j && "Invalid matching number!"); OS << "~" << Number; } else { - MVT::ValueType VT = getValueType(ArgType->getValueAsDef("VT")); + MVT::SimpleValueType VT = getValueType(ArgType->getValueAsDef("VT")); OS << getEnumName(VT); if (VT == MVT::isVoid && j != 0 && j != ArgTypes.size()-1) throw "Var arg type not last argument"; diff --git a/utils/TableGen/RegisterInfoEmitter.cpp b/utils/TableGen/RegisterInfoEmitter.cpp index b9253cf..9277335 100644 --- a/utils/TableGen/RegisterInfoEmitter.cpp +++ b/utils/TableGen/RegisterInfoEmitter.cpp @@ -221,7 +221,7 @@ void RegisterInfoEmitter::run(std::ostream &OS) { // Emit the register list now. OS << " // " << Name << " Register Class Value Types...\n" - << " static const MVT::ValueType " << Name + << " static const MVT " << Name << "[] = {\n "; for (unsigned i = 0, e = RC.VTs.size(); i != e; ++i) OS << getEnumName(RC.VTs[i]) << ", "; |