diff options
21 files changed, 2237 insertions, 27 deletions
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 29ecedc..aaf9a42 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -100,7 +100,8 @@ public: TypeExpandFloat, // Split this float into two of half the size. TypeScalarizeVector, // Replace this one-element vector with its element. TypeSplitVector, // Split this vector into two of half the size. - TypeWidenVector // This vector should be widened into a larger vector. + TypeWidenVector, // This vector should be widened into a larger vector. + TypePromoteFloat // Replace this float with a larger one. }; /// LegalizeKind holds the legalization kind that needs to happen to EVT diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 22fd6d6..e218e22 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -307,6 +307,7 @@ namespace { SDValue visitINSERT_SUBVECTOR(SDNode *N); SDValue visitMLOAD(SDNode *N); SDValue visitMSTORE(SDNode *N); + SDValue visitFP_TO_FP16(SDNode *N); SDValue XformToShuffleWithZero(SDNode *N); SDValue ReassociateOps(unsigned Opc, SDLoc DL, SDValue LHS, SDValue RHS); @@ -1380,6 +1381,7 @@ SDValue DAGCombiner::visit(SDNode *N) { case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N); case ISD::MLOAD: return visitMLOAD(N); case ISD::MSTORE: return visitMSTORE(N); + case ISD::FP_TO_FP16: return visitFP_TO_FP16(N); } return SDValue(); } @@ -8161,6 +8163,11 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) { if (isConstantFPBuildVectorOrConstantFP(N0)) return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0); + // fold (fp_extend (fp16_to_fp op)) -> (fp16_to_fp op) + if (N0.getOpcode() == ISD::FP16_TO_FP && + TLI.getOperationAction(ISD::FP16_TO_FP, VT) == TargetLowering::Legal) + return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), VT, N0.getOperand(0)); + // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the // value of X. if (N0.getOpcode() == ISD::FP_ROUND @@ -12349,6 +12356,16 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) { return SDValue(); } +SDValue DAGCombiner::visitFP_TO_FP16(SDNode *N) { + SDValue N0 = N->getOperand(0); + + // fold (fp_to_fp16 (fp16_to_fp op)) -> op + if (N0->getOpcode() == ISD::FP16_TO_FP) + return N0->getOperand(0); + + return SDValue(); +} + /// Returns a vector_shuffle if it able to transform an AND to a vector_shuffle /// with the destination vector and a zero vector. /// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==> diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index b596715..a456fd9 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -1579,3 +1579,419 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) { return DAG.getTruncStore(Chain, SDLoc(N), Hi, Ptr, ST->getMemoryVT(), ST->getMemOperand()); } + +//===----------------------------------------------------------------------===// +// Float Operand Promotion +//===----------------------------------------------------------------------===// +// + +static ISD::NodeType GetPromotionOpcode(EVT OpVT, EVT RetVT) { + if (OpVT == MVT::f16) { + return ISD::FP16_TO_FP; + } else if (RetVT == MVT::f16) { + return ISD::FP_TO_FP16; + } + + report_fatal_error("Attempt at an invalid promotion-related conversion"); +} + +bool DAGTypeLegalizer::PromoteFloatOperand(SDNode *N, unsigned OpNo) { + SDValue R = SDValue(); + + // Nodes that use a promotion-requiring floating point operand, but doesn't + // produce a promotion-requiring floating point result, need to be legalized + // to use the promoted float operand. Nodes that produce at least one + // promotion-requiring floating point result have their operands legalized as + // a part of PromoteFloatResult. + switch (N->getOpcode()) { + default: + llvm_unreachable("Do not know how to promote this operator's operand!"); + + case ISD::BITCAST: R = PromoteFloatOp_BITCAST(N, OpNo); break; + case ISD::FCOPYSIGN: R = PromoteFloatOp_FCOPYSIGN(N, OpNo); break; + case ISD::FP_TO_SINT: + case ISD::FP_TO_UINT: R = PromoteFloatOp_FP_TO_XINT(N, OpNo); break; + case ISD::FP_EXTEND: R = PromoteFloatOp_FP_EXTEND(N, OpNo); break; + case ISD::SELECT_CC: R = PromoteFloatOp_SELECT_CC(N, OpNo); break; + case ISD::SETCC: R = PromoteFloatOp_SETCC(N, OpNo); break; + case ISD::STORE: R = PromoteFloatOp_STORE(N, OpNo); break; + } + + if (R.getNode()) + ReplaceValueWith(SDValue(N, 0), R); + return false; +} + +SDValue DAGTypeLegalizer::PromoteFloatOp_BITCAST(SDNode *N, unsigned OpNo) { + SDValue Op = N->getOperand(0); + EVT OpVT = Op->getValueType(0); + + EVT IVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits()); + assert (IVT == N->getValueType(0) && "Bitcast to type of different size"); + + SDValue Promoted = GetPromotedFloat(N->getOperand(0)); + EVT PromotedVT = Promoted->getValueType(0); + + // Convert the promoted float value to the desired IVT. + return DAG.getNode(GetPromotionOpcode(PromotedVT, OpVT), SDLoc(N), IVT, + Promoted); +} + +// Promote Operand 1 of FCOPYSIGN. Operand 0 ought to be handled by +// PromoteFloatRes_FCOPYSIGN. +SDValue DAGTypeLegalizer::PromoteFloatOp_FCOPYSIGN(SDNode *N, unsigned OpNo) { + assert (OpNo == 1 && "Only Operand 1 must need promotion here"); + SDValue Op1 = GetPromotedFloat(N->getOperand(1)); + + return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), + N->getOperand(0), Op1); +} + +// Convert the promoted float value to the desired integer type +SDValue DAGTypeLegalizer::PromoteFloatOp_FP_TO_XINT(SDNode *N, unsigned OpNo) { + SDValue Op = GetPromotedFloat(N->getOperand(0)); + return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), Op); +} + +SDValue DAGTypeLegalizer::PromoteFloatOp_FP_EXTEND(SDNode *N, unsigned OpNo) { + SDValue Op = GetPromotedFloat(N->getOperand(0)); + EVT VT = N->getValueType(0); + + // Desired VT is same as promoted type. Use promoted float directly. + if (VT == Op->getValueType(0)) + return Op; + + // Else, extend the promoted float value to the desired VT. + return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, Op); +} + +// Promote the float operands used for comparison. The true- and false- +// operands have the same type as the result and are promoted, if needed, by +// PromoteFloatRes_SELECT_CC +SDValue DAGTypeLegalizer::PromoteFloatOp_SELECT_CC(SDNode *N, unsigned OpNo) { + SDValue LHS = GetPromotedFloat(N->getOperand(0)); + SDValue RHS = GetPromotedFloat(N->getOperand(1)); + + return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0), + LHS, RHS, N->getOperand(2), N->getOperand(3), + N->getOperand(4)); +} + +// Construct a SETCC that compares the promoted values and sets the conditional +// code. +SDValue DAGTypeLegalizer::PromoteFloatOp_SETCC(SDNode *N, unsigned OpNo) { + EVT VT = N->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + SDValue Op0 = GetPromotedFloat(N->getOperand(0)); + SDValue Op1 = GetPromotedFloat(N->getOperand(1)); + ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get(); + + return DAG.getSetCC(SDLoc(N), NVT, Op0, Op1, CCCode); + +} + +// Lower the promoted Float down to the integer value of same size and construct +// a STORE of the integer value. +SDValue DAGTypeLegalizer::PromoteFloatOp_STORE(SDNode *N, unsigned OpNo) { + StoreSDNode *ST = cast<StoreSDNode>(N); + SDValue Val = ST->getValue(); + SDLoc DL(N); + + SDValue Promoted = GetPromotedFloat(Val); + EVT VT = ST->getOperand(1)->getValueType(0); + EVT IVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); + + SDValue NewVal; + NewVal = DAG.getNode(GetPromotionOpcode(Promoted.getValueType(), VT), DL, + IVT, Promoted); + + return DAG.getStore(ST->getChain(), DL, NewVal, ST->getBasePtr(), + ST->getMemOperand()); +} + +//===----------------------------------------------------------------------===// +// Float Result Promotion +//===----------------------------------------------------------------------===// + +void DAGTypeLegalizer::PromoteFloatResult(SDNode *N, unsigned ResNo) { + SDValue R = SDValue(); + + switch (N->getOpcode()) { + // These opcodes cannot appear if promotion of FP16 is done in the backend + // instead of Clang + case ISD::FP16_TO_FP: + case ISD::FP_TO_FP16: + default: + llvm_unreachable("Do not know how to promote this operator's result!"); + + case ISD::BITCAST: R = PromoteFloatRes_BITCAST(N); break; + case ISD::ConstantFP: R = PromoteFloatRes_ConstantFP(N); break; + case ISD::EXTRACT_VECTOR_ELT: + R = PromoteFloatRes_EXTRACT_VECTOR_ELT(N); break; + case ISD::FCOPYSIGN: R = PromoteFloatRes_FCOPYSIGN(N); break; + + // Unary FP Operations + case ISD::FABS: + case ISD::FCEIL: + case ISD::FCOS: + case ISD::FEXP: + case ISD::FEXP2: + case ISD::FFLOOR: + case ISD::FLOG: + case ISD::FLOG2: + case ISD::FLOG10: + case ISD::FNEARBYINT: + case ISD::FNEG: + case ISD::FRINT: + case ISD::FROUND: + case ISD::FSIN: + case ISD::FSQRT: + case ISD::FTRUNC: R = PromoteFloatRes_UnaryOp(N); break; + + // Binary FP Operations + case ISD::FADD: + case ISD::FDIV: + case ISD::FMAXNUM: + case ISD::FMINNUM: + case ISD::FMUL: + case ISD::FPOW: + case ISD::FREM: + case ISD::FSUB: R = PromoteFloatRes_BinOp(N); break; + + case ISD::FMA: // FMA is same as FMAD + case ISD::FMAD: R = PromoteFloatRes_FMAD(N); break; + + case ISD::FPOWI: R = PromoteFloatRes_FPOWI(N); break; + + case ISD::FP_ROUND: R = PromoteFloatRes_FP_ROUND(N); break; + case ISD::LOAD: R = PromoteFloatRes_LOAD(N); break; + case ISD::SELECT: R = PromoteFloatRes_SELECT(N); break; + case ISD::SELECT_CC: R = PromoteFloatRes_SELECT_CC(N); break; + + case ISD::SINT_TO_FP: + case ISD::UINT_TO_FP: R = PromoteFloatRes_XINT_TO_FP(N); break; + case ISD::UNDEF: R = PromoteFloatRes_UNDEF(N); break; + + } + + if (R.getNode()) + SetPromotedFloat(SDValue(N, ResNo), R); +} + +// Bitcast from i16 to f16: convert the i16 to a f32 value instead. +// At this point, it is not possible to determine if the bitcast value is +// eventually stored to memory or promoted to f32 or promoted to a floating +// point at a higher precision. Some of these cases are handled by FP_EXTEND, +// STORE promotion handlers. +SDValue DAGTypeLegalizer::PromoteFloatRes_BITCAST(SDNode *N) { + EVT VT = N->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + return DAG.getNode(GetPromotionOpcode(VT, NVT), SDLoc(N), NVT, + N->getOperand(0)); +} + +SDValue DAGTypeLegalizer::PromoteFloatRes_ConstantFP(SDNode *N) { + ConstantFPSDNode *CFPNode = cast<ConstantFPSDNode>(N); + EVT VT = N->getValueType(0); + + // Get the (bit-cast) APInt of the APFloat and build an integer constant + EVT IVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); + SDValue C = DAG.getConstant(CFPNode->getValueAPF().bitcastToAPInt(), + IVT); + + // Convert the Constant to the desired FP type + // FIXME We might be able to do the conversion during compilation and get rid + // of it from the object code + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + return DAG.getNode(GetPromotionOpcode(VT, NVT), SDLoc(N), NVT, C); +} + +// If the Index operand is a constant, try to redirect the extract operation to +// the correct legalized vector. If not, bit-convert the input vector to +// equivalent integer vector. Extract the element as an (bit-cast) integer +// value and convert it to the promoted type. +SDValue DAGTypeLegalizer::PromoteFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) { + SDLoc DL(N); + + // If the index is constant, try to extract the value from the legalized + // vector type. + if (isa<ConstantSDNode>(N->getOperand(1))) { + SDValue Vec = N->getOperand(0); + SDValue Idx = N->getOperand(1); + EVT VecVT = Vec->getValueType(0); + EVT EltVT = VecVT.getVectorElementType(); + + uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); + + switch (getTypeAction(VecVT)) { + default: break; + case TargetLowering::TypeScalarizeVector: { + SDValue Res = GetScalarizedVector(N->getOperand(0)); + ReplaceValueWith(SDValue(N, 0), Res); + return SDValue(); + } + case TargetLowering::TypeWidenVector: { + Vec = GetWidenedVector(Vec); + SDValue Res = DAG.getNode(N->getOpcode(), DL, EltVT, Vec, Idx); + ReplaceValueWith(SDValue(N, 0), Res); + return SDValue(); + } + case TargetLowering::TypeSplitVector: { + SDValue Lo, Hi; + GetSplitVector(Vec, Lo, Hi); + + uint64_t LoElts = Lo.getValueType().getVectorNumElements(); + SDValue Res; + if (IdxVal < LoElts) + Res = DAG.getNode(N->getOpcode(), DL, EltVT, Lo, Idx); + else + Res = DAG.getNode(N->getOpcode(), DL, EltVT, Hi, + DAG.getConstant(IdxVal - LoElts, + Idx.getValueType())); + ReplaceValueWith(SDValue(N, 0), Res); + return SDValue(); + } + + } + } + + // Bit-convert the input vector to the equivalent integer vector + SDValue NewOp = BitConvertVectorToIntegerVector(N->getOperand(0)); + EVT IVT = NewOp.getValueType().getVectorElementType(); + + // Extract the element as an (bit-cast) integer value + SDValue NewVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IVT, + NewOp, N->getOperand(1)); + + // Convert the element to the desired FP type + EVT VT = N->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + return DAG.getNode(GetPromotionOpcode(VT, NVT), SDLoc(N), NVT, NewVal); +} + +// FCOPYSIGN(X, Y) returns the value of X with the sign of Y. If the result +// needs promotion, so does the argument X. Note that Y, if needed, will be +// handled during operand promotion. +SDValue DAGTypeLegalizer::PromoteFloatRes_FCOPYSIGN(SDNode *N) { + EVT VT = N->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + SDValue Op0 = GetPromotedFloat(N->getOperand(0)); + + SDValue Op1 = N->getOperand(1); + + return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, Op0, Op1); +} + +// Unary operation where the result and the operand have PromoteFloat type +// action. Construct a new SDNode with the promoted float value of the old +// operand. +SDValue DAGTypeLegalizer::PromoteFloatRes_UnaryOp(SDNode *N) { + EVT VT = N->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + SDValue Op = GetPromotedFloat(N->getOperand(0)); + + return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, Op); +} + +// Binary operations where the result and both operands have PromoteFloat type +// action. Construct a new SDNode with the promoted float values of the old +// operands. +SDValue DAGTypeLegalizer::PromoteFloatRes_BinOp(SDNode *N) { + EVT VT = N->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + SDValue Op0 = GetPromotedFloat(N->getOperand(0)); + SDValue Op1 = GetPromotedFloat(N->getOperand(1)); + + return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, Op0, Op1); +} + +SDValue DAGTypeLegalizer::PromoteFloatRes_FMAD(SDNode *N) { + EVT VT = N->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + SDValue Op0 = GetPromotedFloat(N->getOperand(0)); + SDValue Op1 = GetPromotedFloat(N->getOperand(1)); + SDValue Op2 = GetPromotedFloat(N->getOperand(2)); + + return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, Op0, Op1, Op2); +} + +// Promote the Float (first) operand and retain the Integer (second) operand +SDValue DAGTypeLegalizer::PromoteFloatRes_FPOWI(SDNode *N) { + EVT VT = N->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + SDValue Op0 = GetPromotedFloat(N->getOperand(0)); + SDValue Op1 = N->getOperand(1); + + return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, Op0, Op1); +} + +// Explicit operation to reduce precision. Reduce the value to half precision +// and promote it back to the legal type. +SDValue DAGTypeLegalizer::PromoteFloatRes_FP_ROUND(SDNode *N) { + SDLoc DL(N); + + SDValue Op = N->getOperand(0); + EVT VT = N->getValueType(0); + EVT OpVT = Op->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); + EVT IVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); + + // Round promoted float to desired precision + SDValue Round = DAG.getNode(GetPromotionOpcode(OpVT, VT), DL, IVT, Op); + // Promote it back to the legal output type + return DAG.getNode(GetPromotionOpcode(VT, NVT), DL, NVT, Round); +} + +SDValue DAGTypeLegalizer::PromoteFloatRes_LOAD(SDNode *N) { + LoadSDNode *L = cast<LoadSDNode>(N); + EVT VT = N->getValueType(0); + + // Load the value as an integer value with the same number of bits + EVT IVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); + SDValue newL = DAG.getLoad(L->getAddressingMode(), L->getExtensionType(), + IVT, SDLoc(N), L->getChain(), L->getBasePtr(), + L->getOffset(), L->getPointerInfo(), IVT, L->isVolatile(), + L->isNonTemporal(), false, L->getAlignment(), + L->getAAInfo()); + // Legalize the chain result by replacing uses of the old value chain with the + // new one + ReplaceValueWith(SDValue(N, 1), newL.getValue(1)); + + // Convert the integer value to the desired FP type + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + return DAG.getNode(GetPromotionOpcode(VT, NVT), SDLoc(N), NVT, newL); +} + +// Construct a new SELECT node with the promoted true- and false- values. +SDValue DAGTypeLegalizer::PromoteFloatRes_SELECT(SDNode *N) { + SDValue TrueVal = GetPromotedFloat(N->getOperand(1)); + SDValue FalseVal = GetPromotedFloat(N->getOperand(2)); + + return DAG.getNode(ISD::SELECT, SDLoc(N), TrueVal->getValueType(0), + N->getOperand(0), TrueVal, FalseVal); +} + +// Construct a new SELECT_CC node with the promoted true- and false- values. +// The operands used for comparison are promoted by PromoteFloatOp_SELECT_CC. +SDValue DAGTypeLegalizer::PromoteFloatRes_SELECT_CC(SDNode *N) { + SDValue TrueVal = GetPromotedFloat(N->getOperand(2)); + SDValue FalseVal = GetPromotedFloat(N->getOperand(3)); + + return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0), + N->getOperand(0), N->getOperand(1), TrueVal, FalseVal, + N->getOperand(4)); +} + +// Construct a SDNode that transforms the SINT or UINT operand to the promoted +// float type. +SDValue DAGTypeLegalizer::PromoteFloatRes_XINT_TO_FP(SDNode *N) { + EVT VT = N->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, N->getOperand(0)); +} + +SDValue DAGTypeLegalizer::PromoteFloatRes_UNDEF(SDNode *N) { + return DAG.getUNDEF(TLI.getTypeToTransformTo(*DAG.getContext(), + N->getValueType(0))); +} + diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index 25e80b9..6643103 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -251,6 +251,16 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BITCAST(SDNode *N) { case TargetLowering::TypeSoftenFloat: // Promote the integer operand by hand. return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT, GetSoftenedFloat(InOp)); + case TargetLowering::TypePromoteFloat: { + // Convert the promoted float by hand. + if (NOutVT.bitsEq(NInVT)) { + SDValue PromotedOp = GetPromotedFloat(InOp); + SDValue Trunc = DAG.getNode(ISD::FP_TO_FP16, dl, NOutVT, PromotedOp); + return DAG.getNode(ISD::AssertZext, dl, NOutVT, Trunc, + DAG.getValueType(OutVT)); + } + break; + } case TargetLowering::TypeExpandInteger: case TargetLowering::TypeExpandFloat: break; @@ -1845,7 +1855,11 @@ void DAGTypeLegalizer::ExpandIntRes_FP_TO_SINT(SDNode *N, SDValue &Lo, SDValue &Hi) { SDLoc dl(N); EVT VT = N->getValueType(0); + SDValue Op = N->getOperand(0); + if (getTypeAction(Op.getValueType()) == TargetLowering::TypePromoteFloat) + Op = GetPromotedFloat(Op); + RTLIB::Libcall LC = RTLIB::getFPTOSINT(Op.getValueType(), VT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected fp-to-sint conversion!"); SplitInteger(TLI.makeLibCall(DAG, LC, VT, &Op, 1, true/*irrelevant*/, @@ -1857,7 +1871,11 @@ void DAGTypeLegalizer::ExpandIntRes_FP_TO_UINT(SDNode *N, SDValue &Lo, SDValue &Hi) { SDLoc dl(N); EVT VT = N->getValueType(0); + SDValue Op = N->getOperand(0); + if (getTypeAction(Op.getValueType()) == TargetLowering::TypePromoteFloat) + Op = GetPromotedFloat(Op); + RTLIB::Libcall LC = RTLIB::getFPTOUINT(Op.getValueType(), VT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected fp-to-uint conversion!"); SplitInteger(TLI.makeLibCall(DAG, LC, VT, &Op, 1, false/*irrelevant*/, diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index ebf6b28..00b261b 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -259,6 +259,10 @@ bool DAGTypeLegalizer::run() { WidenVectorResult(N, i); Changed = true; goto NodeDone; + case TargetLowering::TypePromoteFloat: + PromoteFloatResult(N, i); + Changed = true; + goto NodeDone; } } @@ -308,6 +312,10 @@ ScanOperands: NeedsReanalyzing = WidenVectorOperand(N, i); Changed = true; break; + case TargetLowering::TypePromoteFloat: + NeedsReanalyzing = PromoteFloatOperand(N, i); + Changed = true; + break; } break; } @@ -753,6 +761,17 @@ void DAGTypeLegalizer::SetSoftenedFloat(SDValue Op, SDValue Result) { OpEntry = Result; } +void DAGTypeLegalizer::SetPromotedFloat(SDValue Op, SDValue Result) { + assert(Result.getValueType() == + TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) && + "Invalid type for promoted float"); + AnalyzeNewValue(Result); + + SDValue &OpEntry = PromotedFloats[Op]; + assert(!OpEntry.getNode() && "Node is already promoted!"); + OpEntry = Result; +} + void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) { // Note that in some cases vector operation operands may be greater than // the vector element type. For example BUILD_VECTOR of type <1 x i1> with diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h index 9de85d7..58d6282 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -93,6 +93,11 @@ private: /// the same size, this map indicates the converted value to use. SmallDenseMap<SDValue, SDValue, 8> SoftenedFloats; + /// PromotedFloats - For floating point nodes that have a smaller precision + /// than the smallest supported precision, this map indicates what promoted + /// value to use. + SmallDenseMap<SDValue, SDValue, 8> PromotedFloats; + /// ExpandedFloats - For float nodes that need to be expanded this map /// indicates which operands are the expanded version of the input. SmallDenseMap<SDValue, std::pair<SDValue, SDValue>, 8> ExpandedFloats; @@ -499,6 +504,44 @@ private: void FloatExpandSetCCOperands(SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, SDLoc dl); + + //===--------------------------------------------------------------------===// + // Float promotion support: LegalizeFloatTypes.cpp + //===--------------------------------------------------------------------===// + + SDValue GetPromotedFloat(SDValue Op) { + SDValue &PromotedOp = PromotedFloats[Op]; + RemapValue(PromotedOp); + assert(PromotedOp.getNode() && "Operand wasn't promoted?"); + return PromotedOp; + } + void SetPromotedFloat(SDValue Op, SDValue Result); + + void PromoteFloatResult(SDNode *N, unsigned ResNo); + SDValue PromoteFloatRes_BITCAST(SDNode *N); + SDValue PromoteFloatRes_BinOp(SDNode *N); + SDValue PromoteFloatRes_ConstantFP(SDNode *N); + SDValue PromoteFloatRes_EXTRACT_VECTOR_ELT(SDNode *N); + SDValue PromoteFloatRes_FCOPYSIGN(SDNode *N); + SDValue PromoteFloatRes_FMAD(SDNode *N); + SDValue PromoteFloatRes_FPOWI(SDNode *N); + SDValue PromoteFloatRes_FP_ROUND(SDNode *N); + SDValue PromoteFloatRes_LOAD(SDNode *N); + SDValue PromoteFloatRes_SELECT(SDNode *N); + SDValue PromoteFloatRes_SELECT_CC(SDNode *N); + SDValue PromoteFloatRes_UnaryOp(SDNode *N); + SDValue PromoteFloatRes_UNDEF(SDNode *N); + SDValue PromoteFloatRes_XINT_TO_FP(SDNode *N); + + bool PromoteFloatOperand(SDNode *N, unsigned ResNo); + SDValue PromoteFloatOp_BITCAST(SDNode *N, unsigned OpNo); + SDValue PromoteFloatOp_FCOPYSIGN(SDNode *N, unsigned OpNo); + SDValue PromoteFloatOp_FP_EXTEND(SDNode *N, unsigned OpNo); + SDValue PromoteFloatOp_FP_TO_XINT(SDNode *N, unsigned OpNo); + SDValue PromoteFloatOp_STORE(SDNode *N, unsigned OpNo); + SDValue PromoteFloatOp_SELECT_CC(SDNode *N, unsigned OpNo); + SDValue PromoteFloatOp_SETCC(SDNode *N, unsigned OpNo); + //===--------------------------------------------------------------------===// // Scalarization Support: LegalizeVectorTypes.cpp //===--------------------------------------------------------------------===// @@ -593,7 +636,7 @@ private: bool SplitVectorOperand(SDNode *N, unsigned OpNo); SDValue SplitVecOp_VSELECT(SDNode *N, unsigned OpNo); SDValue SplitVecOp_UnaryOp(SDNode *N); - SDValue SplitVecOp_TruncateHelper(SDNode *N, unsigned TruncateOp); + SDValue SplitVecOp_TruncateHelper(SDNode *N); SDValue SplitVecOp_BITCAST(SDNode *N); SDValue SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N); diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp index 38829b6..a1569a5 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp @@ -50,6 +50,9 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) { case TargetLowering::TypeLegal: case TargetLowering::TypePromoteInteger: break; + case TargetLowering::TypePromoteFloat: + llvm_unreachable("Bitcast of a promotion-needing float should never need" + "expansion"); case TargetLowering::TypeSoftenFloat: // Convert the integer operand instead. SplitInteger(GetSoftenedFloat(InOp), Lo, Hi); diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index f000902..135f1d1 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -723,6 +723,7 @@ void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo, switch (getTypeAction(InVT)) { case TargetLowering::TypeLegal: case TargetLowering::TypePromoteInteger: + case TargetLowering::TypePromoteFloat: case TargetLowering::TypeSoftenFloat: case TargetLowering::TypeScalarizeVector: case TargetLowering::TypeWidenVector: @@ -1294,7 +1295,7 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) { case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break; case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break; case ISD::TRUNCATE: - Res = SplitVecOp_TruncateHelper(N, ISD::TRUNCATE); + Res = SplitVecOp_TruncateHelper(N); break; case ISD::FP_ROUND: Res = SplitVecOp_FP_ROUND(N); break; case ISD::STORE: @@ -1309,14 +1310,14 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) { case ISD::FP_TO_SINT: case ISD::FP_TO_UINT: if (N->getValueType(0).bitsLT(N->getOperand(0)->getValueType(0))) - Res = SplitVecOp_TruncateHelper(N, ISD::TRUNCATE); + Res = SplitVecOp_TruncateHelper(N); else Res = SplitVecOp_UnaryOp(N); break; case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: if (N->getValueType(0).bitsLT(N->getOperand(0)->getValueType(0))) - Res = SplitVecOp_TruncateHelper(N, ISD::FTRUNC); + Res = SplitVecOp_TruncateHelper(N); else Res = SplitVecOp_UnaryOp(N); break; @@ -1327,10 +1328,8 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) { case ISD::SIGN_EXTEND: case ISD::ZERO_EXTEND: case ISD::ANY_EXTEND: - Res = SplitVecOp_UnaryOp(N); - break; case ISD::FTRUNC: - Res = SplitVecOp_TruncateHelper(N, ISD::FTRUNC); + Res = SplitVecOp_UnaryOp(N); break; } } @@ -1595,8 +1594,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) { return DAG.getNode(ISD::BUILD_VECTOR, DL, N->getValueType(0), Elts); } -SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N, - unsigned TruncateOp) { +SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N) { // The result type is legal, but the input type is illegal. If splitting // ends up with the result type of each half still being legal, just // do that. If, however, that would result in an illegal result type, @@ -1618,6 +1616,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N, EVT InVT = InVec->getValueType(0); EVT OutVT = N->getValueType(0); unsigned NumElements = OutVT.getVectorNumElements(); + bool IsFloat = OutVT.isFloatingPoint(); + // Widening should have already made sure this is a power-two vector // if we're trying to split it at all. assert() that's true, just in case. assert(!(NumElements & 1) && "Splitting vector, but not in half!"); @@ -1636,7 +1636,9 @@ SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N, SDValue InLoVec, InHiVec; std::tie(InLoVec, InHiVec) = DAG.SplitVector(InVec, DL); // Truncate them to 1/2 the element size. - EVT HalfElementVT = EVT::getIntegerVT(*DAG.getContext(), InElementSize/2); + EVT HalfElementVT = IsFloat ? + EVT::getFloatingPointVT(InElementSize/2) : + EVT::getIntegerVT(*DAG.getContext(), InElementSize/2); EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT, NumElements/2); SDValue HalfLo = DAG.getNode(N->getOpcode(), DL, HalfVT, InLoVec); @@ -1649,7 +1651,10 @@ SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N, // type. This should normally be something that ends up being legal directly, // but in theory if a target has very wide vectors and an annoyingly // restricted set of legal types, this split can chain to build things up. - return DAG.getNode(TruncateOp, DL, OutVT, InterVec); + return IsFloat ? + DAG.getNode(ISD::FP_ROUND, DL, OutVT, InterVec, + DAG.getTargetConstant(0, TLI.getPointerTy())) : + DAG.getNode(ISD::TRUNCATE, DL, OutVT, InterVec); } SDValue DAGTypeLegalizer::SplitVecOp_VSETCC(SDNode *N) { @@ -2129,6 +2134,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BITCAST(SDNode *N) { return DAG.getNode(ISD::BITCAST, dl, WidenVT, InOp); break; case TargetLowering::TypeSoftenFloat: + case TargetLowering::TypePromoteFloat: case TargetLowering::TypeExpandInteger: case TargetLowering::TypeExpandFloat: case TargetLowering::TypeScalarizeVector: diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp index 2162a51..8eb6464 100644 --- a/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -1256,10 +1256,19 @@ void TargetLoweringBase::computeRegisterProperties( } if (!isTypeLegal(MVT::f16)) { - NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; - RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; - TransformToType[MVT::f16] = MVT::i16; - ValueTypeActions.setTypeAction(MVT::f16, TypeSoftenFloat); + // If the target has native f32 support, promote f16 operations to f32. If + // f32 is not supported, generate soft float library calls. + if (isTypeLegal(MVT::f32)) { + NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32]; + RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32]; + TransformToType[MVT::f16] = MVT::f32; + ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat); + } else { + NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; + RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; + TransformToType[MVT::f16] = MVT::i16; + ValueTypeActions.setTypeAction(MVT::f16, TypeSoftenFloat); + } } // Loop over all of the vector value types to see which need transformations. diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 90a5e5e..9e59972 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -557,11 +557,21 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Promote); setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Promote); setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Promote); + // i8 and i16 vector elements also need promotion to i32 for v8i8 or v8i16 + // -> v8f16 conversions. + setOperationAction(ISD::SINT_TO_FP, MVT::v8i8, Promote); + setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Promote); + setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote); + setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Promote); // Similarly, there is no direct i32 -> f64 vector conversion instruction. setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom); setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Custom); + // Or, direct i32 -> f16 vector conversion. Set it so custom, so the + // conversion happens in two steps: v4i32 -> v4f32 -> v4f16 + setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Custom); + setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom); // AArch64 doesn't have MUL.2d: setOperationAction(ISD::MUL, MVT::v2i64, Expand); diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td index f7db50a..92d4460 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.td +++ b/lib/Target/AArch64/AArch64InstrInfo.td @@ -5128,22 +5128,26 @@ def : Pat<(trap), (BRK 1)>; // Natural vector casts (64 bit) def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>; def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>; +def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>; def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>; def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>; def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>; def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>; def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>; +def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>; def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>; def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>; def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>; def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>; +def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>; def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>; def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>; def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>; def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>; +def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>; def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>; def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>; def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>; @@ -5158,22 +5162,26 @@ def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>; // Natural vector casts (128 bit) def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>; def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>; +def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>; def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>; def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>; def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>; def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>; def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>; +def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>; def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>; def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>; def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>; def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>; +def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>; def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>; def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>; def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>; def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>; +def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>; def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>; def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>; def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>; diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index f37737d..c78b79f 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -221,9 +221,23 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM, setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); } - // MIPS doesn't have extending float->double load/store - for (MVT VT : MVT::fp_valuetypes()) + // MIPS doesn't have extending float->double load/store. Set LoadExtAction + // for f32, f16 + for (MVT VT : MVT::fp_valuetypes()) { setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); + setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); + } + + // Set LoadExtAction for f16 vectors to Expand + for (MVT VT : MVT::fp_vector_valuetypes()) { + MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements()); + if (F16VT.isValid()) + setLoadExtAction(ISD::EXTLOAD, VT, F16VT, Expand); + } + + setTruncStoreAction(MVT::f32, MVT::f16, Expand); + setTruncStoreAction(MVT::f64, MVT::f16, Expand); + setTruncStoreAction(MVT::f64, MVT::f32, Expand); // Used by legalize types to correctly generate the setcc result. @@ -339,6 +353,12 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM, setOperationAction(ISD::FREM, MVT::f32, Expand); setOperationAction(ISD::FREM, MVT::f64, Expand); + // Lower f16 conversion operations into library calls + setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); + setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); + setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); + setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); + setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); setOperationAction(ISD::VASTART, MVT::Other, Custom); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c32412a..a04eca1 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -746,6 +746,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, // them legal. if (VT.getVectorElementType() == MVT::i1) setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); + + // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are + // split/scalarized right now. + if (VT.getVectorElementType() == MVT::f16) + setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); } } @@ -17304,6 +17309,11 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, return; } case ISD::FP_TO_SINT: + // FP_TO_INT*_IN_MEM is not legal for f16 inputs. Do not convert + // (FP_TO_SINT (load f16)) to FP_TO_INT*. + if (N->getOperand(0).getValueType() == MVT::f16) + break; + // fallthrough case ISD::FP_TO_UINT: { bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; @@ -17349,6 +17359,13 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, Results.push_back(V); return; } + case ISD::FP_EXTEND: { + // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND. + // No other ValueType for FP_EXTEND should reach this point. + assert(N->getValueType(0) == MVT::v2f32 && + "Do not know how to legalize this Node"); + return; + } case ISD::INTRINSIC_W_CHAIN: { unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); switch (IntNo) { @@ -23642,6 +23659,11 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, if (Op0.getOpcode() == ISD::LOAD) { LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); EVT VT = Ld->getValueType(0); + + // This transformation is not supported if the result type is f16 + if (N->getValueType(0) == MVT::f16) + return SDValue(); + if (!Ld->isVolatile() && !N->getValueType(0).isVector() && ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && !Subtarget->is64Bit() && VT == MVT::i64) { diff --git a/test/CodeGen/AArch64/arm64-convert-v4f64.ll b/test/CodeGen/AArch64/arm64-convert-v4f64.ll index c4e3e4e..b8da399 100644 --- a/test/CodeGen/AArch64/arm64-convert-v4f64.ll +++ b/test/CodeGen/AArch64/arm64-convert-v4f64.ll @@ -31,3 +31,36 @@ define <8 x i8> @fptosi_v4f64_to_v4i8(<8 x double>* %ptr) { ret <8 x i8> %tmp2 } +define <4 x half> @uitofp_v4i64_to_v4f16(<4 x i64>* %ptr) { +; CHECK: uitofp_v4i64_to_v4f16 +; CHECK-DAG: ucvtf v[[LHS:[0-9]+]].2d, v0.2d +; CHECK-DAG: ucvtf v[[RHS:[0-9]+]].2d, v1.2d +; CHECK-DAG: fcvtn v[[MID:[0-9]+]].2s, v[[LHS]].2d +; CHECK-DAG: fcvtn2 v[[MID]].4s, v[[RHS]].2d +; CHECK: fcvtn v0.4h, v[[MID]].4s + %tmp1 = load <4 x i64>, <4 x i64>* %ptr + %tmp2 = uitofp <4 x i64> %tmp1 to <4 x half> + ret <4 x half> %tmp2 +} + +define <4 x i16> @trunc_v4i64_to_v4i16(<4 x i64>* %ptr) { +; CHECK: trunc_v4i64_to_v4i16 +; CHECK: xtn +; CHECK: xtn2 +; CHECK: xtn + %tmp1 = load <4 x i64>, <4 x i64>* %ptr + %tmp2 = trunc <4 x i64> %tmp1 to <4 x i16> + ret <4 x i16> %tmp2 +} + +define <4 x i16> @fptoui_v4f64_to_v4i16(<4 x double>* %ptr) { +; CHECK: fptoui_v4f64_to_v4i16 +; CHECK-DAG: fcvtzu v[[LHS:[0-9]+]].2d, v0.2d +; CHECK-DAG: fcvtzu v[[RHS:[0-9]+]].2d, v1.2d +; CHECK-DAG: xtn v[[MID:[0-9]+]].2s, v[[LHS]].2d +; CHECK-DAG: xtn2 v[[MID]].4s, v[[RHS]].2d +; CHECK: xtn v0.4h, v[[MID]].4s + %tmp1 = load <4 x double>, <4 x double>* %ptr + %tmp2 = fptoui <4 x double> %tmp1 to <4 x i16> + ret <4 x i16> %tmp2 +} diff --git a/test/CodeGen/AArch64/fp16-v16-instructions.ll b/test/CodeGen/AArch64/fp16-v16-instructions.ll new file mode 100644 index 0000000..1af2bd1 --- /dev/null +++ b/test/CodeGen/AArch64/fp16-v16-instructions.ll @@ -0,0 +1,105 @@ +; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s + + +define <16 x half> @sitofp_i32(<16 x i32> %a) #0 { +; CHECK-LABEL: sitofp_i32: +; CHECK-DAG: scvtf [[S0:v[0-9]+\.4s]], v0.4s +; CHECK-DAG: scvtf [[S1:v[0-9]+\.4s]], v1.4s +; CHECK-DAG: scvtf [[S2:v[0-9]+\.4s]], v2.4s +; CHECK-DAG: scvtf [[S3:v[0-9]+\.4s]], v3.4s +; CHECK-DAG: fcvtn v0.4h, [[S0]] +; CHECK-DAG: fcvtn v1.4h, [[S2]] +; CHECK-DAG: v[[R1:[0-9]+]].4h, [[S1]] +; CHECK-DAG: v[[R3:[0-9]+]].4h, [[S3]] +; CHECK-DAg: ins v0.d[1], v[[R1]].d[0] +; CHECK-DAG: ins v1.d[1], v[[R3]].d[0] + + %1 = sitofp <16 x i32> %a to <16 x half> + ret <16 x half> %1 +} + + +define <16 x half> @sitofp_i64(<16 x i64> %a) #0 { +; CHECK-LABEL: sitofp_i64: +; CHECK-DAG: scvtf [[D0:v[0-9]+\.2d]], v0.2d +; CHECK-DAG: scvtf [[D1:v[0-9]+\.2d]], v1.2d +; CHECK-DAG: scvtf [[D2:v[0-9]+\.2d]], v2.2d +; CHECK-DAG: scvtf [[D3:v[0-9]+\.2d]], v3.2d +; CHECK-DAG: scvtf [[D4:v[0-9]+\.2d]], v4.2d +; CHECK-DAG: scvtf [[D5:v[0-9]+\.2d]], v5.2d +; CHECK-DAG: scvtf [[D6:v[0-9]+\.2d]], v6.2d +; CHECK-DAG: scvtf [[D7:v[0-9]+\.2d]], v7.2d + +; CHECK-DAG: fcvtn [[S0:v[0-9]+]].2s, [[D0]] +; CHECK-DAG: fcvtn [[S1:v[0-9]+]].2s, [[D2]] +; CHECK-DAG: fcvtn [[S2:v[0-9]+]].2s, [[D4]] +; CHECK-DAG: fcvtn [[S3:v[0-9]+]].2s, [[D6]] + +; CHECK-DAG: fcvtn2 [[S0]].4s, [[D1]] +; CHECK-DAG: fcvtn2 [[S1]].4s, [[D3]] +; CHECK-DAG: fcvtn2 [[S2]].4s, [[D5]] +; CHECK-DAG: fcvtn2 [[S3]].4s, [[D7]] + +; CHECK-DAG: fcvtn v0.4h, [[S0]].4s +; CHECK-DAG: fcvtn v1.4h, [[S2]].4s +; CHECK-DAG: fcvtn v[[R1:[0-9]+]].4h, [[S1]].4s +; CHECK-DAG: fcvtn v[[R3:[0-9]+]].4h, [[S3]].4s +; CHECK-DAG: ins v0.d[1], v[[R1]].d[0] +; CHECK-DAG: ins v1.d[1], v[[R3]].d[0] + + %1 = sitofp <16 x i64> %a to <16 x half> + ret <16 x half> %1 +} + + +define <16 x half> @uitofp_i32(<16 x i32> %a) #0 { +; CHECK-LABEL: uitofp_i32: +; CHECK-DAG: ucvtf [[S0:v[0-9]+\.4s]], v0.4s +; CHECK-DAG: ucvtf [[S1:v[0-9]+\.4s]], v1.4s +; CHECK-DAG: ucvtf [[S2:v[0-9]+\.4s]], v2.4s +; CHECK-DAG: ucvtf [[S3:v[0-9]+\.4s]], v3.4s +; CHECK-DAG: fcvtn v0.4h, [[S0]] +; CHECK-DAG: fcvtn v1.4h, [[S2]] +; CHECK-DAG: v[[R1:[0-9]+]].4h, [[S1]] +; CHECK-DAG: v[[R3:[0-9]+]].4h, [[S3]] +; CHECK-DAg: ins v0.d[1], v[[R1]].d[0] +; CHECK-DAG: ins v1.d[1], v[[R3]].d[0] + + %1 = uitofp <16 x i32> %a to <16 x half> + ret <16 x half> %1 +} + + +define <16 x half> @uitofp_i64(<16 x i64> %a) #0 { +; CHECK-LABEL: uitofp_i64: +; CHECK-DAG: ucvtf [[D0:v[0-9]+\.2d]], v0.2d +; CHECK-DAG: ucvtf [[D1:v[0-9]+\.2d]], v1.2d +; CHECK-DAG: ucvtf [[D2:v[0-9]+\.2d]], v2.2d +; CHECK-DAG: ucvtf [[D3:v[0-9]+\.2d]], v3.2d +; CHECK-DAG: ucvtf [[D4:v[0-9]+\.2d]], v4.2d +; CHECK-DAG: ucvtf [[D5:v[0-9]+\.2d]], v5.2d +; CHECK-DAG: ucvtf [[D6:v[0-9]+\.2d]], v6.2d +; CHECK-DAG: ucvtf [[D7:v[0-9]+\.2d]], v7.2d + +; CHECK-DAG: fcvtn [[S0:v[0-9]+]].2s, [[D0]] +; CHECK-DAG: fcvtn [[S1:v[0-9]+]].2s, [[D2]] +; CHECK-DAG: fcvtn [[S2:v[0-9]+]].2s, [[D4]] +; CHECK-DAG: fcvtn [[S3:v[0-9]+]].2s, [[D6]] + +; CHECK-DAG: fcvtn2 [[S0]].4s, [[D1]] +; CHECK-DAG: fcvtn2 [[S1]].4s, [[D3]] +; CHECK-DAG: fcvtn2 [[S2]].4s, [[D5]] +; CHECK-DAG: fcvtn2 [[S3]].4s, [[D7]] + +; CHECK-DAG: fcvtn v0.4h, [[S0]].4s +; CHECK-DAG: fcvtn v1.4h, [[S2]].4s +; CHECK-DAG: fcvtn v[[R1:[0-9]+]].4h, [[S1]].4s +; CHECK-DAG: fcvtn v[[R3:[0-9]+]].4h, [[S3]].4s +; CHECK-DAG: ins v0.d[1], v[[R1]].d[0] +; CHECK-DAG: ins v1.d[1], v[[R3]].d[0] + + %1 = uitofp <16 x i64> %a to <16 x half> + ret <16 x half> %1 +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/AArch64/fp16-v4-instructions.ll b/test/CodeGen/AArch64/fp16-v4-instructions.ll index 6db4e97..0dbda15 100644 --- a/test/CodeGen/AArch64/fp16-v4-instructions.ll +++ b/test/CodeGen/AArch64/fp16-v4-instructions.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s +; RUN: llc < %s -asm-verbose=false -mtriple=aarch64-none-eabi | FileCheck %s define <4 x half> @add_h(<4 x half> %a, <4 x half> %b) { entry: @@ -129,3 +129,93 @@ define <4 x i16> @bitcast_h_to_i(float, <4 x half> %a) { %2 = bitcast <4 x half> %a to <4 x i16> ret <4 x i16> %2 } + + +define <4 x half> @sitofp_i8(<4 x i8> %a) #0 { +; CHECK-LABEL: sitofp_i8: +; CHECK-NEXT: shl [[OP1:v[0-9]+\.4h]], v0.4h, #8 +; CHECK-NEXT: sshr [[OP2:v[0-9]+\.4h]], [[OP1]], #8 +; CHECK-NEXT: sshll [[OP3:v[0-9]+\.4s]], [[OP2]], #0 +; CHECK-NEXT: scvtf [[OP4:v[0-9]+\.4s]], [[OP3]] +; CHECK-NEXT: fcvtn v0.4h, [[OP4]] +; CHECK-NEXT: ret + %1 = sitofp <4 x i8> %a to <4 x half> + ret <4 x half> %1 +} + + +define <4 x half> @sitofp_i16(<4 x i16> %a) #0 { +; CHECK-LABEL: sitofp_i16: +; CHECK-NEXT: sshll [[OP1:v[0-9]+\.4s]], v0.4h, #0 +; CHECK-NEXT: scvtf [[OP2:v[0-9]+\.4s]], [[OP1]] +; CHECK-NEXT: fcvtn v0.4h, [[OP2]] +; CHECK-NEXT: ret + %1 = sitofp <4 x i16> %a to <4 x half> + ret <4 x half> %1 +} + + +define <4 x half> @sitofp_i32(<4 x i32> %a) #0 { +; CHECK-LABEL: sitofp_i32: +; CHECK-NEXT: scvtf [[OP1:v[0-9]+\.4s]], v0.4s +; CHECK-NEXT: fcvtn v0.4h, [[OP1]] + %1 = sitofp <4 x i32> %a to <4 x half> + ret <4 x half> %1 +} + + +define <4 x half> @sitofp_i64(<4 x i64> %a) #0 { +; CHECK-LABEL: sitofp_i64: +; CHECK-DAG: scvtf [[OP1:v[0-9]+\.2d]], v0.2d +; CHECK-DAG: scvtf [[OP2:v[0-9]+\.2d]], v1.2d +; CHECK-DAG: fcvtn [[OP3:v[0-9]+]].2s, [[OP1]] +; CHECK-NEXT: fcvtn2 [[OP3]].4s, [[OP2]] +; CHECK-NEXT: fcvtn v0.4h, [[OP3]].4s + %1 = sitofp <4 x i64> %a to <4 x half> + ret <4 x half> %1 +} + +define <4 x half> @uitofp_i8(<4 x i8> %a) #0 { +; CHECK-LABEL: uitofp_i8: +; CHECK-NEXT: bic v0.4h, #0xff, lsl #8 +; CHECK-NEXT: ushll [[OP1:v[0-9]+\.4s]], v0.4h, #0 +; CHECK-NEXT: ucvtf [[OP2:v[0-9]+\.4s]], [[OP1]] +; CHECK-NEXT: fcvtn v0.4h, [[OP2]] +; CHECK-NEXT: ret + %1 = uitofp <4 x i8> %a to <4 x half> + ret <4 x half> %1 +} + + +define <4 x half> @uitofp_i16(<4 x i16> %a) #0 { +; CHECK-LABEL: uitofp_i16: +; CHECK-NEXT: ushll [[OP1:v[0-9]+\.4s]], v0.4h, #0 +; CHECK-NEXT: ucvtf [[OP2:v[0-9]+\.4s]], [[OP1]] +; CHECK-NEXT: fcvtn v0.4h, [[OP2]] +; CHECK-NEXT: ret + %1 = uitofp <4 x i16> %a to <4 x half> + ret <4 x half> %1 +} + + +define <4 x half> @uitofp_i32(<4 x i32> %a) #0 { +; CHECK-LABEL: uitofp_i32: +; CHECK-NEXT: ucvtf [[OP1:v[0-9]+\.4s]], v0.4s +; CHECK-NEXT: fcvtn v0.4h, [[OP1]] + %1 = uitofp <4 x i32> %a to <4 x half> + ret <4 x half> %1 +} + + +define <4 x half> @uitofp_i64(<4 x i64> %a) #0 { +; CHECK-LABEL: uitofp_i64: +; CHECK-DAG: ucvtf [[OP1:v[0-9]+\.2d]], v0.2d +; CHECK-DAG: ucvtf [[OP2:v[0-9]+\.2d]], v1.2d +; CHECK-DAG: fcvtn [[OP3:v[0-9]+]].2s, [[OP1]] +; CHECK-NEXT: fcvtn2 [[OP3]].4s, [[OP2]] +; CHECK-NEXT: fcvtn v0.4h, [[OP3]].4s + %1 = uitofp <4 x i64> %a to <4 x half> + ret <4 x half> %1 +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/AArch64/fp16-v8-instructions.ll b/test/CodeGen/AArch64/fp16-v8-instructions.ll index e51c0c5..10a8c22 100644 --- a/test/CodeGen/AArch64/fp16-v8-instructions.ll +++ b/test/CodeGen/AArch64/fp16-v8-instructions.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s +; RUN: llc < %s -asm-verbose=false -mtriple=aarch64-none-eabi | FileCheck %s define <8 x half> @add_h(<8 x half> %a, <8 x half> %b) { entry: @@ -253,3 +253,109 @@ define <8 x i16> @bitcast_h_to_i(float, <8 x half> %a) { ret <8 x i16> %2 } + +define <8 x half> @sitofp_i8(<8 x i8> %a) #0 { +; CHECK-LABEL: sitofp_i8: +; CHECK-NEXT: sshll v[[REG1:[0-9]+]].8h, v0.8b, #0 +; CHECK-NEXT: sshll2 [[LO:v[0-9]+\.4s]], v[[REG1]].8h, #0 +; CHECK-NEXT: sshll [[HI:v[0-9]+\.4s]], v[[REG1]].4h, #0 +; CHECK-DAG: scvtf [[HIF:v[0-9]+\.4s]], [[HI]] +; CHECK-DAG: scvtf [[LOF:v[0-9]+\.4s]], [[LO]] +; CHECK-DAG: fcvtn v[[LOREG:[0-9]+]].4h, [[LOF]] +; CHECK-DAG: fcvtn v0.4h, [[HIF]] +; CHECK: ins v0.d[1], v[[LOREG]].d[0] + %1 = sitofp <8 x i8> %a to <8 x half> + ret <8 x half> %1 +} + + +define <8 x half> @sitofp_i16(<8 x i16> %a) #0 { +; CHECK-LABEL: sitofp_i16: +; CHECK-NEXT: sshll2 [[LO:v[0-9]+\.4s]], v0.8h, #0 +; CHECK-NEXT: sshll [[HI:v[0-9]+\.4s]], v0.4h, #0 +; CHECK-DAG: scvtf [[HIF:v[0-9]+\.4s]], [[HI]] +; CHECK-DAG: scvtf [[LOF:v[0-9]+\.4s]], [[LO]] +; CHECK-DAG: fcvtn v[[LOREG:[0-9]+]].4h, [[LOF]] +; CHECK-DAG: fcvtn v0.4h, [[HIF]] +; CHECK: ins v0.d[1], v[[LOREG]].d[0] + %1 = sitofp <8 x i16> %a to <8 x half> + ret <8 x half> %1 +} + + +define <8 x half> @sitofp_i32(<8 x i32> %a) #0 { +; CHECK-LABEL: sitofp_i32: +; CHECK-DAG: scvtf [[OP1:v[0-9]+\.4s]], v0.4s +; CHECK-DAG: scvtf [[OP2:v[0-9]+\.4s]], v1.4s +; CHECK-DAG: fcvtn v[[REG:[0-9]+]].4h, [[OP2]] +; CHECK-DAG: fcvtn v0.4h, [[OP1]] +; CHECK: ins v0.d[1], v[[REG]].d[0] + %1 = sitofp <8 x i32> %a to <8 x half> + ret <8 x half> %1 +} + + +define <8 x half> @sitofp_i64(<8 x i64> %a) #0 { +; CHECK-LABEL: sitofp_i64: +; CHECK-DAG: scvtf [[OP1:v[0-9]+\.2d]], v0.2d +; CHECK-DAG: scvtf [[OP2:v[0-9]+\.2d]], v1.2d +; CHECK-DAG: fcvtn [[OP3:v[0-9]+]].2s, [[OP1]] +; CHECK-DAG: fcvtn2 [[OP3]].4s, [[OP2]] +; CHECK: fcvtn v0.4h, [[OP3]].4s + %1 = sitofp <8 x i64> %a to <8 x half> + ret <8 x half> %1 +} + +define <8 x half> @uitofp_i8(<8 x i8> %a) #0 { +; CHECK-LABEL: uitofp_i8: +; CHECK-NEXT: ushll v[[REG1:[0-9]+]].8h, v0.8b, #0 +; CHECK-NEXT: ushll2 [[LO:v[0-9]+\.4s]], v[[REG1]].8h, #0 +; CHECK-NEXT: ushll [[HI:v[0-9]+\.4s]], v[[REG1]].4h, #0 +; CHECK-DAG: ucvtf [[HIF:v[0-9]+\.4s]], [[HI]] +; CHECK-DAG: ucvtf [[LOF:v[0-9]+\.4s]], [[LO]] +; CHECK-DAG: fcvtn v[[LOREG:[0-9]+]].4h, [[LOF]] +; CHECK-DAG: fcvtn v0.4h, [[HIF]] +; CHECK: ins v0.d[1], v[[LOREG]].d[0] + %1 = uitofp <8 x i8> %a to <8 x half> + ret <8 x half> %1 +} + + +define <8 x half> @uitofp_i16(<8 x i16> %a) #0 { +; CHECK-LABEL: uitofp_i16: +; CHECK-NEXT: ushll2 [[LO:v[0-9]+\.4s]], v0.8h, #0 +; CHECK-NEXT: ushll [[HI:v[0-9]+\.4s]], v0.4h, #0 +; CHECK-DAG: ucvtf [[HIF:v[0-9]+\.4s]], [[HI]] +; CHECK-DAG: ucvtf [[LOF:v[0-9]+\.4s]], [[LO]] +; CHECK-DAG: fcvtn v[[LOREG:[0-9]+]].4h, [[LOF]] +; CHECK-DAG: fcvtn v0.4h, [[HIF]] +; CHECK: ins v0.d[1], v[[LOREG]].d[0] + %1 = uitofp <8 x i16> %a to <8 x half> + ret <8 x half> %1 +} + + +define <8 x half> @uitofp_i32(<8 x i32> %a) #0 { +; CHECK-LABEL: uitofp_i32: +; CHECK-DAG: ucvtf [[OP1:v[0-9]+\.4s]], v0.4s +; CHECK-DAG: ucvtf [[OP2:v[0-9]+\.4s]], v1.4s +; CHECK-DAG: fcvtn v[[REG:[0-9]+]].4h, [[OP2]] +; CHECK-DAG: fcvtn v0.4h, [[OP1]] +; CHECK: ins v0.d[1], v[[REG]].d[0] + %1 = uitofp <8 x i32> %a to <8 x half> + ret <8 x half> %1 +} + + +define <8 x half> @uitofp_i64(<8 x i64> %a) #0 { +; CHECK-LABEL: uitofp_i64: +; CHECK-DAG: ucvtf [[OP1:v[0-9]+\.2d]], v0.2d +; CHECK-DAG: ucvtf [[OP2:v[0-9]+\.2d]], v1.2d +; CHECK-DAG: fcvtn [[OP3:v[0-9]+]].2s, [[OP1]] +; CHECK-DAG: fcvtn2 [[OP3]].4s, [[OP2]] +; CHECK: fcvtn v0.4h, [[OP3]].4s + %1 = uitofp <8 x i64> %a to <8 x half> + ret <8 x half> %1 +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/AArch64/fp16-vector-nvcast.ll b/test/CodeGen/AArch64/fp16-vector-nvcast.ll new file mode 100644 index 0000000..83e0df7 --- /dev/null +++ b/test/CodeGen/AArch64/fp16-vector-nvcast.ll @@ -0,0 +1,89 @@ +; RUN: llc < %s -asm-verbose=false -mtriple=aarch64-none-eabi | FileCheck %s + +; Test pattern (v4f16 (AArch64NvCast (v2i32 FPR64:$src))) +define void @nvcast_v2i32(<4 x half>* %a) #0 { +; CHECK-LABEL: nvcast_v2i32: +; CHECK-NEXT: movi v[[REG:[0-9]+]].2s, #0xab, lsl #16 +; CHECK-NEXT: str d[[REG]], [x0] +; CHECK-NEXT: ret + store volatile <4 x half> <half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB>, <4 x half>* %a + ret void +} + + +; Test pattern (v4f16 (AArch64NvCast (v4i16 FPR64:$src))) +define void @nvcast_v4i16(<4 x half>* %a) #0 { +; CHECK-LABEL: nvcast_v4i16: +; CHECK-NEXT: movi v[[REG:[0-9]+]].4h, #0xab +; CHECK-NEXT: str d[[REG]], [x0] +; CHECK-NEXT: ret + store volatile <4 x half> <half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB>, <4 x half>* %a + ret void +} + + +; Test pattern (v4f16 (AArch64NvCast (v8i8 FPR64:$src))) +define void @nvcast_v8i8(<4 x half>* %a) #0 { +; CHECK-LABEL: nvcast_v8i8: +; CHECK-NEXT: movi v[[REG:[0-9]+]].8b, #0xab +; CHECK-NEXT: str d[[REG]], [x0] +; CHECK-NEXT: ret + store volatile <4 x half> <half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB>, <4 x half>* %a + ret void +} + + +; Test pattern (v4f16 (AArch64NvCast (f64 FPR64:$src))) +define void @nvcast_f64(<4 x half>* %a) #0 { +; CHECK-LABEL: nvcast_f64: +; CHECK-NEXT: movi d[[REG:[0-9]+]], #0000000000000000 +; CHECK-NEXT: str d[[REG]], [x0] +; CHECK-NEXT: ret + store volatile <4 x half> zeroinitializer, <4 x half>* %a + ret void +} + +; Test pattern (v8f16 (AArch64NvCast (v4i32 FPR128:$src))) +define void @nvcast_v4i32(<8 x half>* %a) #0 { +; CHECK-LABEL: nvcast_v4i32: +; CHECK-NEXT: movi v[[REG:[0-9]+]].4s, #0xab, lsl #16 +; CHECK-NEXT: str q[[REG]], [x0] +; CHECK-NEXT: ret + store volatile <8 x half> <half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB>, <8 x half>* %a + ret void +} + + +; Test pattern (v8f16 (AArch64NvCast (v8i16 FPR128:$src))) +define void @nvcast_v8i16(<8 x half>* %a) #0 { +; CHECK-LABEL: nvcast_v8i16: +; CHECK-NEXT: movi v[[REG:[0-9]+]].8h, #0xab +; CHECK-NEXT: str q[[REG]], [x0] +; CHECK-NEXT: ret + store volatile <8 x half> <half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB>, <8 x half>* %a + ret void +} + + +; Test pattern (v8f16 (AArch64NvCast (v16i8 FPR128:$src))) +define void @nvcast_v16i8(<8 x half>* %a) #0 { +; CHECK-LABEL: nvcast_v16i8: +; CHECK-NEXT: movi v[[REG:[0-9]+]].16b, #0xab +; CHECK-NEXT: str q[[REG]], [x0] +; CHECK-NEXT: ret + store volatile <8 x half> <half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB>, <8 x half>* %a + ret void +} + + +; Test pattern (v8f16 (AArch64NvCast (v2i64 FPR128:$src))) +define void @nvcast_v2i64(<8 x half>* %a) #0 { +; CHECK-LABEL: nvcast_v2i64: +; CHECK-NEXT: movi v[[REG:[0-9]+]].2d, #0000000000000000 +; CHECK-NEXT: str q[[REG]], [x0] +; CHECK-NEXT: ret + store volatile <8 x half> zeroinitializer, <8 x half>* %a + ret void +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/ARM/fp16-promote.ll b/test/CodeGen/ARM/fp16-promote.ll new file mode 100644 index 0000000..e691c2b --- /dev/null +++ b/test/CodeGen/ARM/fp16-promote.ll @@ -0,0 +1,903 @@ +; RUN: llc -asm-verbose=false < %s -mattr=+vfp3,+fp16 | FileCheck %s -check-prefix=CHECK-FP16 -check-prefix=CHECK-ALL +; RUN: llc -asm-verbose=false < %s | FileCheck %s -check-prefix=CHECK-LIBCALL -check-prefix=CHECK-ALL + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32" +target triple = "armv7-eabihf" + +; CHECK-FP16-LABEL: test_fadd: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vadd.f32 +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_fadd: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vadd.f32 +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_fadd(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = fadd half %a, %b + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_fsub: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vsub.f32 +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_fsub: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vsub.f32 +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_fsub(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = fsub half %a, %b + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_fmul: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vmul.f32 +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_fmul +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vmul.f32 +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_fmul(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = fmul half %a, %b + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_fdiv: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vdiv.f32 +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_fdiv +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vdiv.f32 +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_fdiv(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = fdiv half %a, %b + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_frem: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl fmodf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_frem +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl fmodf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_frem(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = frem half %a, %b + store half %r, half* %p + ret void +} + +; CHECK-ALL-LABEL: test_load_store: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: ldrh r0, [r0] +; CHECK-ALL-NEXT: strh r0, [r1] +; CHECK-ALL-NEXT: bx lr +define void @test_load_store(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + store half %a, half* %q + ret void +} + +; Testing only successfull compilation of function calls. In ARM ABI, half +; args and returns are handled as f32. + +declare half @test_callee(half %a, half %b) #0 + +; CHECK-ALL-LABEL: test_call: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: push {r11, lr} +; CHECK-ALL-NEXT: bl test_callee +; CHECK-ALL-NEXT: pop {r11, pc} +define half @test_call(half %a, half %b) #0 { + %r = call half @test_callee(half %a, half %b) + ret half %r +} + +; CHECK-ALL-LABEL: test_call_flipped: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: push {r11, lr} +; CHECK-ALL-NEXT: mov r2, r0 +; CHECK-ALL-NEXT: mov r0, r1 +; CHECK-ALL-NEXT: mov r1, r2 +; CHECK-ALL-NEXT: bl test_callee +; CHECK-ALL-NEXT: pop {r11, pc} +define half @test_call_flipped(half %a, half %b) #0 { + %r = call half @test_callee(half %b, half %a) + ret half %r +} + +; CHECK-ALL-LABEL: test_tailcall_flipped: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: mov r2, r0 +; CHECK-ALL-NEXT: mov r0, r1 +; CHECK-ALL-NEXT: mov r1, r2 +; CHECK-ALL-NEXT: b test_callee +define half @test_tailcall_flipped(half %a, half %b) #0 { + %r = tail call half @test_callee(half %b, half %a) + ret half %r +} + +; Optimizer picks %p or %q based on %c and only loads that value +; No conversion is needed +; CHECK-ALL-LABEL: test_select: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: cmp r2, #0 +; CHECK-ALL-NEXT: movne r1, r0 +; CHECK-ALL-NEXT: ldrh r1, [r1] +; CHECK-ALL-NEXT: strh r1, [r0] +; CHECK-ALL-NEXT: bx lr +define void @test_select(half* %p, half* %q, i1 zeroext %c) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = select i1 %c, half %a, half %b + store half %r, half* %p + ret void +} + +; Test only two variants of fcmp. These get translated to f32 vcmpe +; instructions anyway. +; CHECK-FP16-LABEL: test_fcmp_une: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcmpe.f32 +; CHECK-FP16: vmrs APSR_nzcv, fpscr +; CHECK-FP16: movwne +; CHECK-LIBCALL-LABEL: test_fcmp_une: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vcmpe.f32 +; CHECK-LIBCALL: movwne +define i1 @test_fcmp_une(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = fcmp une half %a, %b + ret i1 %r +} + +; CHECK-FP16-LABEL: test_fcmp_ueq: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcmpe.f32 +; CHECK-FP16: vmrs APSR_nzcv, fpscr +; CHECK-FP16: movweq +; CHECK-FP16: movwvs +; CHECK-LIBCALL-LABEL: test_fcmp_ueq: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vcmpe.f32 +; CHECK-LIBCALL: movweq +define i1 @test_fcmp_ueq(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = fcmp ueq half %a, %b + ret i1 %r +} + +; CHECK-FP16-LABEL: test_br_cc: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcmpe.f32 +; CHECK-FP16: vmrs APSR_nzcv, fpscr +; CHECK-FP16: strmi +; CHECK-FP16: strpl +; CHECK-LIBCALL-LABEL: test_br_cc: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vcmpe.f32 +; CHECK-LIBCALL: strmi +; CHECK-LIBCALL: strpl +define void @test_br_cc(half* %p, half* %q, i32* %p1, i32* %p2) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %c = fcmp uge half %a, %b + br i1 %c, label %then, label %else +then: + store i32 0, i32* %p1 + ret void +else: + store i32 0, i32* %p2 + ret void +} + +declare i1 @test_dummy(half* %p) #0 +; CHECK-FP16-LABEL: test_phi: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: [[LOOP:.LBB[1-9_]+]]: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl test_dummy +; CHECK-FP16: bne [[LOOP]] +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_phi: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: [[LOOP:.LBB[1-9_]+]]: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl test_dummy +; CHECK-LIBCALL: bne [[LOOP]] +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_phi(half* %p) #0 { +entry: + %a = load half, half* %p + br label %loop +loop: + %r = phi half [%a, %entry], [%b, %loop] + %b = load half, half* %p + %c = call i1 @test_dummy(half* %p) + br i1 %c, label %loop, label %return +return: + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_fptosi_i32: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvt.s32.f32 +; CHECK-LIBCALL-LABEL: test_fptosi_i32: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vcvt.s32.f32 +define i32 @test_fptosi_i32(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = fptosi half %a to i32 + ret i32 %r +} + +; CHECK-FP16-LABEL: test_fptosi_i64: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl __aeabi_f2lz +; CHECK-LIBCALL-LABEL: test_fptosi_i64: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __aeabi_f2lz +define i64 @test_fptosi_i64(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = fptosi half %a to i64 + ret i64 %r +} + +; CHECK-FP16-LABEL: test_fptoui_i32: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvt.u32.f32 +; CHECK-LIBCALL-LABEL: test_fptoui_i32: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vcvt.u32.f32 +define i32 @test_fptoui_i32(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = fptoui half %a to i32 + ret i32 %r +} + +; CHECK-FP16-LABEL: test_fptoui_i64: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl __aeabi_f2ulz +; CHECK-LIBCALL-LABEL: test_fptoui_i64: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __aeabi_f2ulz +define i64 @test_fptoui_i64(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = fptoui half %a to i64 + ret i64 %r +} + +; CHECK-FP16-LABEL: test_sitofp_i32: +; CHECK-FP16: vcvt.f32.s32 +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_sitofp_i32: +; CHECK-LIBCALL: vcvt.f32.s32 +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_sitofp_i32(i32 %a, half* %p) #0 { + %r = sitofp i32 %a to half + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_uitofp_i32: +; CHECK-FP16: vcvt.f32.u32 +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_uitofp_i32: +; CHECK-LIBCALL: vcvt.f32.u32 +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_uitofp_i32(i32 %a, half* %p) #0 { + %r = uitofp i32 %a to half + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_sitofp_i64: +; CHECK-FP16: bl __aeabi_l2f +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_sitofp_i64: +; CHECK-LIBCALL: bl __aeabi_l2f +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_sitofp_i64(i64 %a, half* %p) #0 { + %r = sitofp i64 %a to half + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_uitofp_i64: +; CHECK-FP16: bl __aeabi_ul2f +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_uitofp_i64: +; CHECK-LIBCALL: bl __aeabi_ul2f +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_uitofp_i64(i64 %a, half* %p) #0 { + %r = uitofp i64 %a to half + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_fptrunc_float: +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_fptrunc_float: +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_fptrunc_float(float %f, half* %p) #0 { + %a = fptrunc float %f to half + store half %a, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_fptrunc_double: +; CHECK-FP16: bl __aeabi_d2h +; CHECK-LIBCALL-LABEL: test_fptrunc_double: +; CHECK-LIBCALL: bl __aeabi_d2h +define void @test_fptrunc_double(double %d, half* %p) #0 { + %a = fptrunc double %d to half + store half %a, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_fpextend_float: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-LIBCALL-LABEL: test_fpextend_float: +; CHECK-LIBCALL: b __gnu_h2f_ieee +define float @test_fpextend_float(half* %p) { + %a = load half, half* %p, align 2 + %r = fpext half %a to float + ret float %r +} + +; CHECK-FP16-LABEL: test_fpextend_double: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvt.f64.f32 +; CHECK-LIBCALL-LABEL: test_fpextend_double: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vcvt.f64.f32 +define double @test_fpextend_double(half* %p) { + %a = load half, half* %p, align 2 + %r = fpext half %a to double + ret double %r +} + +; CHECK-ALL-LABEL: test_bitcast_halftoi16: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: ldrh r0, [r0] +; CHECK-ALL-NEXT: bx lr +define i16 @test_bitcast_halftoi16(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = bitcast half %a to i16 + ret i16 %r +} + +; CHECK-ALL-LABEL: test_bitcast_i16tohalf: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: strh r0, [r1] +; CHECK-ALL-NEXT: bx lr +define void @test_bitcast_i16tohalf(i16 %a, half* %p) #0 { + %r = bitcast i16 %a to half + store half %r, half* %p + ret void +} + +declare half @llvm.sqrt.f16(half %a) #0 +declare half @llvm.powi.f16(half %a, i32 %b) #0 +declare half @llvm.sin.f16(half %a) #0 +declare half @llvm.cos.f16(half %a) #0 +declare half @llvm.pow.f16(half %a, half %b) #0 +declare half @llvm.exp.f16(half %a) #0 +declare half @llvm.exp2.f16(half %a) #0 +declare half @llvm.log.f16(half %a) #0 +declare half @llvm.log10.f16(half %a) #0 +declare half @llvm.log2.f16(half %a) #0 +declare half @llvm.fma.f16(half %a, half %b, half %c) #0 +declare half @llvm.fabs.f16(half %a) #0 +declare half @llvm.minnum.f16(half %a, half %b) #0 +declare half @llvm.maxnum.f16(half %a, half %b) #0 +declare half @llvm.copysign.f16(half %a, half %b) #0 +declare half @llvm.floor.f16(half %a) #0 +declare half @llvm.ceil.f16(half %a) #0 +declare half @llvm.trunc.f16(half %a) #0 +declare half @llvm.rint.f16(half %a) #0 +declare half @llvm.nearbyint.f16(half %a) #0 +declare half @llvm.round.f16(half %a) #0 +declare half @llvm.fmuladd.f16(half %a, half %b, half %c) #0 + +; CHECK-FP16-LABEL: test_sqrt: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vsqrt.f32 +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_sqrt: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vsqrt.f32 +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_sqrt(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = call half @llvm.sqrt.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_fpowi: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl __powisf2 +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_fpowi: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __powisf2 +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_fpowi(half* %p, i32 %b) #0 { + %a = load half, half* %p, align 2 + %r = call half @llvm.powi.f16(half %a, i32 %b) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_sin: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl sinf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_sin: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl sinf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_sin(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = call half @llvm.sin.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_cos: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl cosf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_cos: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl cosf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_cos(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = call half @llvm.cos.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_pow: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl powf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_pow: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl powf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_pow(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = call half @llvm.pow.f16(half %a, half %b) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_exp: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl expf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_exp: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl expf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_exp(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = call half @llvm.exp.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_exp2: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl exp2f +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_exp2: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl exp2f +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_exp2(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = call half @llvm.exp2.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_log: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl logf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_log: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl logf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_log(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = call half @llvm.log.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_log10: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl log10f +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_log10: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl log10f +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_log10(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = call half @llvm.log10.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_log2: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl log2f +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_log2: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl log2f +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_log2(half* %p) #0 { + %a = load half, half* %p, align 2 + %r = call half @llvm.log2.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_fma: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl fmaf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_fma: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl fmaf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_fma(half* %p, half* %q, half* %r) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %c = load half, half* %r, align 2 + %v = call half @llvm.fma.f16(half %a, half %b, half %c) + store half %v, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_fabs: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vabs.f32 +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_fabs: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bfc +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_fabs(half* %p) { + %a = load half, half* %p, align 2 + %r = call half @llvm.fabs.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_minnum: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl fminf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_minnum: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl fminf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_minnum(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = call half @llvm.minnum.f16(half %a, half %b) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_maxnum: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl fmaxf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_maxnum: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl fmaxf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_maxnum(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = call half @llvm.maxnum.f16(half %a, half %b) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_copysign: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vbsl +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_copysign: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vbsl +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_copysign(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = call half @llvm.copysign.f16(half %a, half %b) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_floor: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl floorf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_floor: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl floorf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_floor(half* %p) { + %a = load half, half* %p, align 2 + %r = call half @llvm.floor.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_ceil: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl ceilf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_ceil: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl ceilf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_ceil(half* %p) { + %a = load half, half* %p, align 2 + %r = call half @llvm.ceil.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_trunc: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl truncf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_trunc: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl truncf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_trunc(half* %p) { + %a = load half, half* %p, align 2 + %r = call half @llvm.trunc.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_rint: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl rintf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_rint: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl rintf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_rint(half* %p) { + %a = load half, half* %p, align 2 + %r = call half @llvm.rint.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_nearbyint: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl nearbyintf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_nearbyint: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl nearbyintf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_nearbyint(half* %p) { + %a = load half, half* %p, align 2 + %r = call half @llvm.nearbyint.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_round: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: bl roundf +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_round: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl roundf +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_round(half* %p) { + %a = load half, half* %p, align 2 + %r = call half @llvm.round.f16(half %a) + store half %r, half* %p + ret void +} + +; CHECK-FP16-LABEL: test_fmuladd: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-FP16: vmla.f32 +; CHECK-FP16: vcvtb.f16.f32 +; CHECK-LIBCALL-LABEL: test_fmuladd: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: bl __gnu_h2f_ieee +; CHECK-LIBCALL: vmla.f32 +; CHECK-LIBCALL: bl __gnu_f2h_ieee +define void @test_fmuladd(half* %p, half* %q, half* %r) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %c = load half, half* %r, align 2 + %v = call half @llvm.fmuladd.f16(half %a, half %b, half %c) + store half %v, half* %p + ret void +} + +; f16 vectors are not legal in the backend. Vector elements are not assigned +; to the register, but are stored in the stack instead. Hence insertelement +; and extractelement have these extra loads and stores. + +; CHECK-ALL-LABEL: test_insertelement: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: sub sp, sp, #8 +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: mov +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: add +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: add sp, sp, #8 +; CHECK-ALL-NEXT: bx lr +define void @test_insertelement(half* %p, <4 x half>* %q, i32 %i) #0 { + %a = load half, half* %p, align 2 + %b = load <4 x half>, <4 x half>* %q, align 8 + %c = insertelement <4 x half> %b, half %a, i32 %i + store <4 x half> %c, <4 x half>* %q + ret void +} + +; CHECK-ALL-LABEL: test_extractelement: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: sub sp, sp, #8 +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: orr +; CHECK-ALL-NEXT: str +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: orr +; CHECK-ALL-NEXT: str +; CHECK-ALL-NEXT: mov +; CHECK-ALL-NEXT: add +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: add sp, sp, #8 +; CHECK-ALL-NEXT: bx lr +define void @test_extractelement(half* %p, <4 x half>* %q, i32 %i) #0 { + %a = load <4 x half>, <4 x half>* %q, align 8 + %b = extractelement <4 x half> %a, i32 %i + store half %b, half* %p + ret void +} + +; test struct operations + +%struct.dummy = type { i32, half } + +; CHECK-ALL-LABEL: test_insertvalue: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: ldr +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: str +; CHECK-ALL-NEXT: bx lr +define void @test_insertvalue(%struct.dummy* %p, half* %q) { + %a = load %struct.dummy, %struct.dummy* %p + %b = load half, half* %q + %c = insertvalue %struct.dummy %a, half %b, 1 + store %struct.dummy %c, %struct.dummy* %p + ret void +} + +; CHECK-ALL-LABEL: test_extractvalue: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: ldrh +; CHECK-ALL-NEXT: strh +; CHECK-ALL-NEXT: bx lr +define void @test_extractvalue(%struct.dummy* %p, half* %q) { + %a = load %struct.dummy, %struct.dummy* %p + %b = extractvalue %struct.dummy %a, 1 + store half %b, half* %q + ret void +} + +; CHECK-FP16-LABEL: test_struct_return: +; CHECK-FP16: vcvtb.f32.f16 +; CHECK-LIBCALL-LABEL: test_struct_return: +; CHECK-LIBCALL: bl __gnu_h2f_ieee +define %struct.dummy @test_struct_return(%struct.dummy* %p) { + %a = load %struct.dummy, %struct.dummy* %p + ret %struct.dummy %a +} + +; CHECK-ALL-LABEL: test_struct_arg: +; CHECK-ALL-NEXT: .fnstart +; CHECK-ALL-NEXT: mov r0, r1 +; CHECK-ALL-NEXT: bx lr +define half @test_struct_arg(%struct.dummy %p) { + %a = extractvalue %struct.dummy %p, 1 + ret half %a +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/Mips/fp16-promote.ll b/test/CodeGen/Mips/fp16-promote.ll new file mode 100644 index 0000000..2ac46e0 --- /dev/null +++ b/test/CodeGen/Mips/fp16-promote.ll @@ -0,0 +1,98 @@ +; RUN: llc -asm-verbose=false -mtriple=mipsel-linux-gnueabi < %s | FileCheck %s -check-prefix=CHECK-LIBCALL + +; CHECK-LIBCALL-LABEL: test_fadd: +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +; CHECK-LIBCALL-DAG: add.s +; CHECK-LIBCALL-DAG: %call16(__gnu_f2h_ieee) +define void @test_fadd(half* %p, half* %q) #0 { + %a = load half, half* %p, align 2 + %b = load half, half* %q, align 2 + %r = fadd half %a, %b + store half %r, half* %p + ret void +} + +; CHECK-LIBCALL-LABEL: test_fpext_float: +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +define float @test_fpext_float(half* %p) { + %a = load half, half* %p, align 2 + %r = fpext half %a to float + ret float %r +} + +; CHECK-LIBCALL-LABEL: test_fpext_double: +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +; CHECK-LIBCALL: cvt.d.s +define double @test_fpext_double(half* %p) { + %a = load half, half* %p, align 2 + %r = fpext half %a to double + ret double %r +} + +; CHECK-LIBCALL-LABEL: test_fptrunc_float: +; CHECK-LIBCALL: %call16(__gnu_f2h_ieee) +define void @test_fptrunc_float(float %f, half* %p) #0 { + %a = fptrunc float %f to half + store half %a, half* %p + ret void +} + +; CHECK-LIBCALL-LABEL: test_fptrunc_double: +; CHECK-LIBCALL: %call16(__truncdfhf2) +define void @test_fptrunc_double(double %d, half* %p) #0 { + %a = fptrunc double %d to half + store half %a, half* %p + ret void +} + +; CHECK-LIBCALL-LABEL: test_vec_fpext_float: +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +define <4 x float> @test_vec_fpext_float(<4 x half>* %p) #0 { + %a = load <4 x half>, <4 x half>* %p, align 8 + %b = fpext <4 x half> %a to <4 x float> + ret <4 x float> %b +} + +; This test is not robust against variations in instruction scheduling. +; See the discussion in http://reviews.llvm.org/D8804 +; CHECK-LIBCALL-LABEL: test_vec_fpext_double: +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +; CHECK-LIBCALL: cvt.d.s +; CHECK-LIBCALL: cvt.d.s +; CHECK-LIBCALL: cvt.d.s +; CHECK-LIBCALL: %call16(__gnu_h2f_ieee) +; CHECK-LIBCALL: cvt.d.s +define <4 x double> @test_vec_fpext_double(<4 x half>* %p) #0 { + %a = load <4 x half>, <4 x half>* %p, align 8 + %b = fpext <4 x half> %a to <4 x double> + ret <4 x double> %b +} + +; CHECK-LIBCALL-LABEL: test_vec_fptrunc_float: +; CHECK-LIBCALL: %call16(__gnu_f2h_ieee) +; CHECK-LIBCALL: %call16(__gnu_f2h_ieee) +; CHECK-LIBCALL: %call16(__gnu_f2h_ieee) +; CHECK-LIBCALL: %call16(__gnu_f2h_ieee) +define void @test_vec_fptrunc_float(<4 x float> %a, <4 x half>* %p) #0 { + %b = fptrunc <4 x float> %a to <4 x half> + store <4 x half> %b, <4 x half>* %p, align 8 + ret void +} + +; CHECK-LIBCALL-LABEL: test_vec_fptrunc_double: +; CHECK-LIBCALL: %call16(__truncdfhf2) +; CHECK-LIBCALL: %call16(__truncdfhf2) +; CHECK-LIBCALL: %call16(__truncdfhf2) +; CHECK-LIBCALL: %call16(__truncdfhf2) +define void @test_vec_fptrunc_double(<4 x double> %a, <4 x half>* %p) #0 { + %b = fptrunc <4 x double> %a to <4 x half> + store <4 x half> %b, <4 x half>* %p, align 8 + ret void +} + diff --git a/test/CodeGen/X86/half.ll b/test/CodeGen/X86/half.ll index f4331ba..8a72637 100644 --- a/test/CodeGen/X86/half.ll +++ b/test/CodeGen/X86/half.ll @@ -1,5 +1,7 @@ -; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=-f16c | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LIBCALL -; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+f16c | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-F16C +; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=-f16c -asm-verbose=false \ +; RUN: | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LIBCALL +; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+f16c -asm-verbose=false \ +; RUN: | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-F16C define void @test_load_store(half* %in, half* %out) { ; CHECK-LABEL: test_load_store: @@ -30,7 +32,7 @@ define float @test_extend32(half* %addr) { ; CHECK-LABEL: test_extend32: ; CHECK-LIBCALL: jmp __gnu_h2f_ieee -; CHECK-FP16: vcvtph2ps +; CHECK-F16C: vcvtph2ps %val16 = load half, half* %addr %val32 = fpext half %val16 to float ret float %val32 @@ -41,8 +43,8 @@ define double @test_extend64(half* %addr) { ; CHECK-LIBCALL: callq __gnu_h2f_ieee ; CHECK-LIBCALL: cvtss2sd -; CHECK-FP16: vcvtph2ps -; CHECK-FP16: vcvtss2sd +; CHECK-F16C: vcvtph2ps +; CHECK-F16C: vcvtss2sd %val16 = load half, half* %addr %val32 = fpext half %val16 to double ret double %val32 @@ -52,7 +54,7 @@ define void @test_trunc32(float %in, half* %addr) { ; CHECK-LABEL: test_trunc32: ; CHECK-LIBCALL: callq __gnu_f2h_ieee -; CHECK-FP16: vcvtps2ph +; CHECK-F16C: vcvtps2ph %val16 = fptrunc float %in to half store half %val16, half* %addr ret void @@ -62,8 +64,200 @@ define void @test_trunc64(double %in, half* %addr) { ; CHECK-LABEL: test_trunc64: ; CHECK-LIBCALL: callq __truncdfhf2 -; CHECK-FP16: callq __truncdfhf2 +; CHECK-F16C: callq __truncdfhf2 %val16 = fptrunc double %in to half store half %val16, half* %addr ret void } + +define i64 @test_fptosi_i64(half* %p) #0 { +; CHECK-LABEL: test_fptosi_i64: + +; CHECK-LIBCALL-NEXT: pushq %rax +; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi +; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee +; CHECK-LIBCALL-NEXT: cvttss2si %xmm0, %rax +; CHECK-LIBCALL-NEXT: popq %rdx +; CHECK-LIBCALL-NEXT: retq + +; CHECK-F16C-NEXT: movswl (%rdi), [[REG0:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vmovd [[REG0]], [[REG1:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vcvtph2ps [[REG1]], [[REG2:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vcvttss2si [[REG2]], %rax +; CHECK-F16C-NEXT: retq + %a = load half, half* %p, align 2 + %r = fptosi half %a to i64 + ret i64 %r +} + +define void @test_sitofp_i64(i64 %a, half* %p) #0 { +; CHECK-LABEL: test_sitofp_i64: + +; CHECK-LIBCALL-NEXT: pushq [[ADDR:%[a-z]+]] +; CHECK-LIBCALL-NEXT: movq %rsi, [[ADDR]] +; CHECK-LIBCALL-NEXT: cvtsi2ssq %rdi, %xmm0 +; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee +; CHECK-LIBCALL-NEXT: movw %ax, ([[ADDR]]) +; CHECK_LIBCALL-NEXT: popq [[ADDR]] +; CHECK_LIBCALL-NEXT: retq + +; CHECK-F16C-NEXT: vcvtsi2ssq %rdi, [[REG0:%[a-z0-9]+]], [[REG0]] +; CHECK-F16C-NEXT: vcvtps2ph $0, [[REG0]], [[REG0]] +; CHECK-F16C-NEXT: vmovd [[REG0]], %eax +; CHECK-F16C-NEXT: movw %ax, (%rsi) +; CHECK-F16C-NEXT: retq + %r = sitofp i64 %a to half + store half %r, half* %p + ret void +} + +define i64 @test_fptoui_i64(half* %p) #0 { +; CHECK-LABEL: test_fptoui_i64: + +; FP_TO_UINT is expanded using FP_TO_SINT +; CHECK-LIBCALL-NEXT: pushq %rax +; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi +; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee +; CHECK-LIBCALL-NEXT: movss {{.[A-Z_0-9]+}}(%rip), [[REG1:%[a-z0-9]+]] +; CHECK-LIBCALL-NEXT: movaps %xmm0, [[REG2:%[a-z0-9]+]] +; CHECK-LIBCALL-NEXT: subss [[REG1]], [[REG2]] +; CHECK-LIBCALL-NEXT: cvttss2si [[REG2]], [[REG3:%[a-z0-9]+]] +; CHECK-LIBCALL-NEXT: movabsq $-9223372036854775808, [[REG4:%[a-z0-9]+]] +; CHECK-LIBCALL-NEXT: xorq [[REG3]], [[REG4]] +; CHECK-LIBCALL-NEXT: cvttss2si %xmm0, [[REG5:%[a-z0-9]+]] +; CHECK-LIBCALL-NEXT: ucomiss [[REG1]], %xmm0 +; CHECK-LIBCALL-NEXT: cmovaeq [[REG4]], [[REG5]] +; CHECK-LIBCALL-NEXT: popq %rdx +; CHECK-LIBCALL-NEXT: retq + +; CHECK-F16C-NEXT: movswl (%rdi), [[REG0:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vmovd [[REG0]], [[REG1:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vcvtph2ps [[REG1]], [[REG2:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vmovss {{.[A-Z_0-9]+}}(%rip), [[REG3:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vsubss [[REG3]], [[REG2]], [[REG4:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vcvttss2si [[REG4]], [[REG5:%[a-z0-9]+]] +; CHECK-F16C-NEXT: movabsq $-9223372036854775808, [[REG6:%[a-z0-9]+]] +; CHECK-F16C-NEXT: xorq [[REG5]], [[REG6:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vcvttss2si [[REG2]], [[REG7:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vucomiss [[REG3]], [[REG2]] +; CHECK-F16C-NEXT: cmovaeq [[REG6]], %rax +; CHECK-F16C-NEXT: retq + %a = load half, half* %p, align 2 + %r = fptoui half %a to i64 + ret i64 %r +} + +define void @test_uitofp_i64(i64 %a, half* %p) #0 { +; CHECK-LABEL: test_uitofp_i64: +; CHECK-LIBCALL-NEXT: pushq [[ADDR:%[a-z0-9]+]] +; CHECK-LIBCALL-NEXT: movq %rsi, [[ADDR]] +; CHECK-NEXT: movl %edi, [[REG0:%[a-z0-9]+]] +; CHECK-NEXT: andl $1, [[REG0]] +; CHECK-NEXT: testq %rdi, %rdi +; CHECK-NEXT: js [[LABEL1:.LBB[0-9_]+]] + +; simple conversion to float if non-negative +; CHECK-LIBCALL-NEXT: cvtsi2ssq %rdi, [[REG1:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vcvtsi2ssq %rdi, [[REG1:%[a-z0-9]+]], [[REG1]] +; CHECK-NEXT: jmp [[LABEL2:.LBB[0-9_]+]] + +; convert using shift+or if negative +; CHECK-NEXT: [[LABEL1]]: +; CHECK-NEXT: shrq %rdi +; CHECK-NEXT: orq %rdi, [[REG2:%[a-z0-9]+]] +; CHECK-LIBCALL-NEXT: cvtsi2ssq [[REG2]], [[REG3:%[a-z0-9]+]] +; CHECK-LIBCALL-NEXT: addss [[REG3]], [[REG1]] +; CHECK-F16C-NEXT: vcvtsi2ssq [[REG2]], [[REG3:%[a-z0-9]+]], [[REG3]] +; CHECK-F16C-NEXT: vaddss [[REG3]], [[REG3]], [[REG1:[%a-z0-9]+]] + +; convert float to half +; CHECK-NEXT: [[LABEL2]]: +; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee +; CHECK-LIBCALL-NEXT: movw %ax, ([[ADDR]]) +; CHECK-LIBCALL-NEXT: popq [[ADDR]] +; CHECK-F16C-NEXT: vcvtps2ph $0, [[REG1]], [[REG4:%[a-z0-9]+]] +; CHECK-F16C-NEXT: vmovd [[REG4]], %eax +; CHECK-F16C-NEXT: movw %ax, (%rsi) +; CHECK-NEXT: retq + + %r = uitofp i64 %a to half + store half %r, half* %p + ret void +} + +define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 { +; CHECK-LABEL: test_extend32_vec4: + +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-F16C: vcvtph2ps +; CHECK-F16C: vcvtph2ps +; CHECK-F16C: vcvtph2ps +; CHECK-F16C: vcvtph2ps + %a = load <4 x half>, <4 x half>* %p, align 8 + %b = fpext <4 x half> %a to <4 x float> + ret <4 x float> %b +} + +define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 { +; CHECK-LABEL: test_extend64_vec4 + +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-LIBCALL-DAG: callq __gnu_h2f_ieee +; CHECK-LIBCALL-DAG: callq __gnu_h2f_ieee +; CHECK-LIBCALL-DAG: callq __gnu_h2f_ieee +; CHECK-LIBCALL-DAG: cvtss2sd +; CHECK-LIBCALL-DAG: cvtss2sd +; CHECK-LIBCALL-DAG: cvtss2sd +; CHECK-LIBCALL: cvtss2sd +; CHECK-F16C: vcvtph2ps +; CHECK-F16C-DAG: vcvtph2ps +; CHECK-F16C-DAG: vcvtph2ps +; CHECK-F16C-DAG: vcvtph2ps +; CHECK-F16C-DAG: vcvtss2sd +; CHECK-F16C-DAG: vcvtss2sd +; CHECK-F16C-DAG: vcvtss2sd +; CHECK-F16C: vcvtss2sd + %a = load <4 x half>, <4 x half>* %p, align 8 + %b = fpext <4 x half> %a to <4 x double> + ret <4 x double> %b +} + +define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) { +; CHECK-LABEL: test_trunc32_vec4: + +; CHECK-LIBCALL: callq __gnu_f2h_ieee +; CHECK-LIBCALL: callq __gnu_f2h_ieee +; CHECK-LIBCALL: callq __gnu_f2h_ieee +; CHECK-LIBCALL: callq __gnu_f2h_ieee +; CHECK-F16C: vcvtps2ph +; CHECK-F16C: vcvtps2ph +; CHECK-F16C: vcvtps2ph +; CHECK-F16C: vcvtps2ph +; CHECK: movw +; CHECK: movw +; CHECK: movw +; CHECK: movw + %v = fptrunc <4 x float> %a to <4 x half> + store <4 x half> %v, <4 x half>* %p + ret void +} + +define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) { +; CHECK-LABEL: test_trunc64_vec4: +; CHECK: callq __truncdfhf2 +; CHECK: callq __truncdfhf2 +; CHECK: callq __truncdfhf2 +; CHECK: callq __truncdfhf2 +; CHECK: movw +; CHECK: movw +; CHECK: movw +; CHECK: movw + %v = fptrunc <4 x double> %a to <4 x half> + store <4 x half> %v, <4 x half>* %p + ret void +} + +attributes #0 = { nounwind } |