diff options
Diffstat (limited to 'lib/Target')
-rw-r--r-- | lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 204 | ||||
-rw-r--r-- | lib/Target/AArch64/AArch64ISelLowering.cpp | 135 | ||||
-rw-r--r-- | lib/Target/AArch64/AArch64ISelLowering.h | 14 | ||||
-rw-r--r-- | lib/Target/AArch64/AArch64InstrFormats.td | 26 | ||||
-rw-r--r-- | lib/Target/AArch64/AArch64InstrNEON.td | 224 | ||||
-rw-r--r-- | lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp | 11 | ||||
-rw-r--r-- | lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp | 108 |
7 files changed, 708 insertions, 14 deletions
diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 3b0dd64..fb4e19c 100644 --- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -110,10 +110,12 @@ public: SDNode* Select(SDNode*); private: /// Select NEON load intrinsics. NumVecs should be 1, 2, 3 or 4. - SDNode *SelectVLD(SDNode *N, unsigned NumVecs, const uint16_t *Opcode); + SDNode *SelectVLD(SDNode *N, unsigned NumVecs, bool isUpdating, + const uint16_t *Opcode); /// Select NEON store intrinsics. NumVecs should be 1, 2, 3 or 4. - SDNode *SelectVST(SDNode *N, unsigned NumVecs, const uint16_t *Opcodes); + SDNode *SelectVST(SDNode *N, unsigned NumVecs, bool isUpdating, + const uint16_t *Opcodes); // Form pairs of consecutive 64-bit/128-bit registers. SDNode *createDPairNode(SDValue V0, SDValue V1); @@ -485,7 +487,88 @@ SDNode *AArch64DAGToDAGISel::createQQuadNode(SDValue V0, SDValue V1, SDValue V2, Ops); } +// Get the register stride update opcode of a VLD/VST instruction that +// is otherwise equivalent to the given fixed stride updating instruction. +static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) { + switch (Opc) { + default: break; + case AArch64::LD1WB_8B_fixed: return AArch64::LD1WB_8B_register; + case AArch64::LD1WB_4H_fixed: return AArch64::LD1WB_4H_register; + case AArch64::LD1WB_2S_fixed: return AArch64::LD1WB_2S_register; + case AArch64::LD1WB_1D_fixed: return AArch64::LD1WB_1D_register; + case AArch64::LD1WB_16B_fixed: return AArch64::LD1WB_16B_register; + case AArch64::LD1WB_8H_fixed: return AArch64::LD1WB_8H_register; + case AArch64::LD1WB_4S_fixed: return AArch64::LD1WB_4S_register; + case AArch64::LD1WB_2D_fixed: return AArch64::LD1WB_2D_register; + + case AArch64::LD2WB_8B_fixed: return AArch64::LD2WB_8B_register; + case AArch64::LD2WB_4H_fixed: return AArch64::LD2WB_4H_register; + case AArch64::LD2WB_2S_fixed: return AArch64::LD2WB_2S_register; + case AArch64::LD1WB2V_1D_fixed: return AArch64::LD1WB2V_1D_register; + case AArch64::LD2WB_16B_fixed: return AArch64::LD2WB_16B_register; + case AArch64::LD2WB_8H_fixed: return AArch64::LD2WB_8H_register; + case AArch64::LD2WB_4S_fixed: return AArch64::LD2WB_4S_register; + case AArch64::LD2WB_2D_fixed: return AArch64::LD2WB_2D_register; + + case AArch64::LD3WB_8B_fixed: return AArch64::LD3WB_8B_register; + case AArch64::LD3WB_4H_fixed: return AArch64::LD3WB_4H_register; + case AArch64::LD3WB_2S_fixed: return AArch64::LD3WB_2S_register; + case AArch64::LD1WB3V_1D_fixed: return AArch64::LD1WB3V_1D_register; + case AArch64::LD3WB_16B_fixed: return AArch64::LD3WB_16B_register; + case AArch64::LD3WB_8H_fixed: return AArch64::LD3WB_8H_register; + case AArch64::LD3WB_4S_fixed: return AArch64::LD3WB_4S_register; + case AArch64::LD3WB_2D_fixed: return AArch64::LD3WB_2D_register; + + case AArch64::LD4WB_8B_fixed: return AArch64::LD4WB_8B_register; + case AArch64::LD4WB_4H_fixed: return AArch64::LD4WB_4H_register; + case AArch64::LD4WB_2S_fixed: return AArch64::LD4WB_2S_register; + case AArch64::LD1WB4V_1D_fixed: return AArch64::LD1WB4V_1D_register; + case AArch64::LD4WB_16B_fixed: return AArch64::LD4WB_16B_register; + case AArch64::LD4WB_8H_fixed: return AArch64::LD4WB_8H_register; + case AArch64::LD4WB_4S_fixed: return AArch64::LD4WB_4S_register; + case AArch64::LD4WB_2D_fixed: return AArch64::LD4WB_2D_register; + + case AArch64::ST1WB_8B_fixed: return AArch64::ST1WB_8B_register; + case AArch64::ST1WB_4H_fixed: return AArch64::ST1WB_4H_register; + case AArch64::ST1WB_2S_fixed: return AArch64::ST1WB_2S_register; + case AArch64::ST1WB_1D_fixed: return AArch64::ST1WB_1D_register; + case AArch64::ST1WB_16B_fixed: return AArch64::ST1WB_16B_register; + case AArch64::ST1WB_8H_fixed: return AArch64::ST1WB_8H_register; + case AArch64::ST1WB_4S_fixed: return AArch64::ST1WB_4S_register; + case AArch64::ST1WB_2D_fixed: return AArch64::ST1WB_2D_register; + + case AArch64::ST2WB_8B_fixed: return AArch64::ST2WB_8B_register; + case AArch64::ST2WB_4H_fixed: return AArch64::ST2WB_4H_register; + case AArch64::ST2WB_2S_fixed: return AArch64::ST2WB_2S_register; + case AArch64::ST1WB2V_1D_fixed: return AArch64::ST1WB2V_1D_register; + case AArch64::ST2WB_16B_fixed: return AArch64::ST2WB_16B_register; + case AArch64::ST2WB_8H_fixed: return AArch64::ST2WB_8H_register; + case AArch64::ST2WB_4S_fixed: return AArch64::ST2WB_4S_register; + case AArch64::ST2WB_2D_fixed: return AArch64::ST2WB_2D_register; + + case AArch64::ST3WB_8B_fixed: return AArch64::ST3WB_8B_register; + case AArch64::ST3WB_4H_fixed: return AArch64::ST3WB_4H_register; + case AArch64::ST3WB_2S_fixed: return AArch64::ST3WB_2S_register; + case AArch64::ST1WB3V_1D_fixed: return AArch64::ST1WB3V_1D_register; + case AArch64::ST3WB_16B_fixed: return AArch64::ST3WB_16B_register; + case AArch64::ST3WB_8H_fixed: return AArch64::ST3WB_8H_register; + case AArch64::ST3WB_4S_fixed: return AArch64::ST3WB_4S_register; + case AArch64::ST3WB_2D_fixed: return AArch64::ST3WB_2D_register; + + case AArch64::ST4WB_8B_fixed: return AArch64::ST4WB_8B_register; + case AArch64::ST4WB_4H_fixed: return AArch64::ST4WB_4H_register; + case AArch64::ST4WB_2S_fixed: return AArch64::ST4WB_2S_register; + case AArch64::ST1WB4V_1D_fixed: return AArch64::ST1WB4V_1D_register; + case AArch64::ST4WB_16B_fixed: return AArch64::ST4WB_16B_register; + case AArch64::ST4WB_8H_fixed: return AArch64::ST4WB_8H_register; + case AArch64::ST4WB_4S_fixed: return AArch64::ST4WB_4S_register; + case AArch64::ST4WB_2D_fixed: return AArch64::ST4WB_2D_register; + } + return Opc; // If not one we handle, return it unchanged. +} + SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs, + bool isUpdating, const uint16_t *Opcodes) { assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range"); @@ -510,7 +593,16 @@ SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs, unsigned Opc = Opcodes[OpcodeIndex]; SmallVector<SDValue, 2> Ops; - Ops.push_back(N->getOperand(2)); // Push back the Memory Address + unsigned AddrOpIdx = isUpdating ? 1 : 2; + Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address + + if (isUpdating) { + SDValue Inc = N->getOperand(AddrOpIdx + 1); + if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register + Opc = getVLDSTRegisterUpdateOpcode(Opc); + Ops.push_back(Inc); + } + Ops.push_back(N->getOperand(0)); // Push back the Chain std::vector<EVT> ResTys; @@ -526,6 +618,8 @@ SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs, ResTys.push_back(ResTy); } + if (isUpdating) + ResTys.push_back(MVT::i64); // Type of the updated register ResTys.push_back(MVT::Other); // Type of the Chain SDLoc dl(N); SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); @@ -548,11 +642,14 @@ SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs, CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg)); // Update users of the Chain ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1)); + if (isUpdating) + ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2)); return NULL; } SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs, + bool isUpdating, const uint16_t *Opcodes) { assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range"); SDLoc dl(N); @@ -560,7 +657,8 @@ SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs, MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); - unsigned Vec0Idx = 3; + unsigned AddrOpIdx = isUpdating ? 1 : 2; + unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1) EVT VT = N->getOperand(Vec0Idx).getValueType(); unsigned OpcodeIndex; switch (VT.getSimpleVT().SimpleTy) { @@ -582,11 +680,19 @@ SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs, unsigned Opc = Opcodes[OpcodeIndex]; std::vector<EVT> ResTys; + if (isUpdating) + ResTys.push_back(MVT::i64); ResTys.push_back(MVT::Other); // Type for the Chain SmallVector<SDValue, 6> Ops; - Ops.push_back(N->getOperand(2)); // Push back the Memory Address + Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address + if (isUpdating) { + SDValue Inc = N->getOperand(AddrOpIdx + 1); + if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register + Opc = getVLDSTRegisterUpdateOpcode(Opc); + Ops.push_back(Inc); + } bool is64BitVector = VT.is64BitVector(); SDValue V0 = N->getOperand(Vec0Idx + 0); @@ -768,6 +874,78 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { Node = ResNode; break; } + case AArch64ISD::NEON_LD1_UPD: { + static const uint16_t Opcodes[] = { + AArch64::LD1WB_8B_fixed, AArch64::LD1WB_4H_fixed, + AArch64::LD1WB_2S_fixed, AArch64::LD1WB_1D_fixed, + AArch64::LD1WB_16B_fixed, AArch64::LD1WB_8H_fixed, + AArch64::LD1WB_4S_fixed, AArch64::LD1WB_2D_fixed + }; + return SelectVLD(Node, 1, true, Opcodes); + } + case AArch64ISD::NEON_LD2_UPD: { + static const uint16_t Opcodes[] = { + AArch64::LD2WB_8B_fixed, AArch64::LD2WB_4H_fixed, + AArch64::LD2WB_2S_fixed, AArch64::LD1WB2V_1D_fixed, + AArch64::LD2WB_16B_fixed, AArch64::LD2WB_8H_fixed, + AArch64::LD2WB_4S_fixed, AArch64::LD2WB_2D_fixed + }; + return SelectVLD(Node, 2, true, Opcodes); + } + case AArch64ISD::NEON_LD3_UPD: { + static const uint16_t Opcodes[] = { + AArch64::LD3WB_8B_fixed, AArch64::LD3WB_4H_fixed, + AArch64::LD3WB_2S_fixed, AArch64::LD1WB3V_1D_fixed, + AArch64::LD3WB_16B_fixed, AArch64::LD3WB_8H_fixed, + AArch64::LD3WB_4S_fixed, AArch64::LD3WB_2D_fixed + }; + return SelectVLD(Node, 3, true, Opcodes); + } + case AArch64ISD::NEON_LD4_UPD: { + static const uint16_t Opcodes[] = { + AArch64::LD4WB_8B_fixed, AArch64::LD4WB_4H_fixed, + AArch64::LD4WB_2S_fixed, AArch64::LD1WB4V_1D_fixed, + AArch64::LD4WB_16B_fixed, AArch64::LD4WB_8H_fixed, + AArch64::LD4WB_4S_fixed, AArch64::LD4WB_2D_fixed + }; + return SelectVLD(Node, 4, true, Opcodes); + } + case AArch64ISD::NEON_ST1_UPD: { + static const uint16_t Opcodes[] = { + AArch64::ST1WB_8B_fixed, AArch64::ST1WB_4H_fixed, + AArch64::ST1WB_2S_fixed, AArch64::ST1WB_1D_fixed, + AArch64::ST1WB_16B_fixed, AArch64::ST1WB_8H_fixed, + AArch64::ST1WB_4S_fixed, AArch64::ST1WB_2D_fixed + }; + return SelectVST(Node, 1, true, Opcodes); + } + case AArch64ISD::NEON_ST2_UPD: { + static const uint16_t Opcodes[] = { + AArch64::ST2WB_8B_fixed, AArch64::ST2WB_4H_fixed, + AArch64::ST2WB_2S_fixed, AArch64::ST1WB2V_1D_fixed, + AArch64::ST2WB_16B_fixed, AArch64::ST2WB_8H_fixed, + AArch64::ST2WB_4S_fixed, AArch64::ST2WB_2D_fixed + }; + return SelectVST(Node, 2, true, Opcodes); + } + case AArch64ISD::NEON_ST3_UPD: { + static const uint16_t Opcodes[] = { + AArch64::ST3WB_8B_fixed, AArch64::ST3WB_4H_fixed, + AArch64::ST3WB_2S_fixed, AArch64::ST1WB3V_1D_fixed, + AArch64::ST3WB_16B_fixed, AArch64::ST3WB_8H_fixed, + AArch64::ST3WB_4S_fixed, AArch64::ST3WB_2D_fixed + }; + return SelectVST(Node, 3, true, Opcodes); + } + case AArch64ISD::NEON_ST4_UPD: { + static const uint16_t Opcodes[] = { + AArch64::ST4WB_8B_fixed, AArch64::ST4WB_4H_fixed, + AArch64::ST4WB_2S_fixed, AArch64::ST1WB4V_1D_fixed, + AArch64::ST4WB_16B_fixed, AArch64::ST4WB_8H_fixed, + AArch64::ST4WB_4S_fixed, AArch64::ST4WB_2D_fixed + }; + return SelectVST(Node, 4, true, Opcodes); + } case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_W_CHAIN: { unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); @@ -780,56 +958,56 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { AArch64::LD1_2S, AArch64::LD1_1D, AArch64::LD1_16B, AArch64::LD1_8H, AArch64::LD1_4S, AArch64::LD1_2D }; - return SelectVLD(Node, 1, Opcodes); + return SelectVLD(Node, 1, false, Opcodes); } case Intrinsic::arm_neon_vld2: { static const uint16_t Opcodes[] = { AArch64::LD2_8B, AArch64::LD2_4H, AArch64::LD2_2S, AArch64::LD1_2V_1D, AArch64::LD2_16B, AArch64::LD2_8H, AArch64::LD2_4S, AArch64::LD2_2D }; - return SelectVLD(Node, 2, Opcodes); + return SelectVLD(Node, 2, false, Opcodes); } case Intrinsic::arm_neon_vld3: { static const uint16_t Opcodes[] = { AArch64::LD3_8B, AArch64::LD3_4H, AArch64::LD3_2S, AArch64::LD1_3V_1D, AArch64::LD3_16B, AArch64::LD3_8H, AArch64::LD3_4S, AArch64::LD3_2D }; - return SelectVLD(Node, 3, Opcodes); + return SelectVLD(Node, 3, false, Opcodes); } case Intrinsic::arm_neon_vld4: { static const uint16_t Opcodes[] = { AArch64::LD4_8B, AArch64::LD4_4H, AArch64::LD4_2S, AArch64::LD1_4V_1D, AArch64::LD4_16B, AArch64::LD4_8H, AArch64::LD4_4S, AArch64::LD4_2D }; - return SelectVLD(Node, 4, Opcodes); + return SelectVLD(Node, 4, false, Opcodes); } case Intrinsic::arm_neon_vst1: { static const uint16_t Opcodes[] = { AArch64::ST1_8B, AArch64::ST1_4H, AArch64::ST1_2S, AArch64::ST1_1D, AArch64::ST1_16B, AArch64::ST1_8H, AArch64::ST1_4S, AArch64::ST1_2D }; - return SelectVST(Node, 1, Opcodes); + return SelectVST(Node, 1, false, Opcodes); } case Intrinsic::arm_neon_vst2: { static const uint16_t Opcodes[] = { AArch64::ST2_8B, AArch64::ST2_4H, AArch64::ST2_2S, AArch64::ST1_2V_1D, AArch64::ST2_16B, AArch64::ST2_8H, AArch64::ST2_4S, AArch64::ST2_2D }; - return SelectVST(Node, 2, Opcodes); + return SelectVST(Node, 2, false, Opcodes); } case Intrinsic::arm_neon_vst3: { static const uint16_t Opcodes[] = { AArch64::ST3_8B, AArch64::ST3_4H, AArch64::ST3_2S, AArch64::ST1_3V_1D, AArch64::ST3_16B, AArch64::ST3_8H, AArch64::ST3_4S, AArch64::ST3_2D }; - return SelectVST(Node, 3, Opcodes); + return SelectVST(Node, 3, false, Opcodes); } case Intrinsic::arm_neon_vst4: { static const uint16_t Opcodes[] = { AArch64::ST4_8B, AArch64::ST4_4H, AArch64::ST4_2S, AArch64::ST1_4V_1D, AArch64::ST4_16B, AArch64::ST4_8H, AArch64::ST4_4S, AArch64::ST4_2D }; - return SelectVST(Node, 4, Opcodes); + return SelectVST(Node, 4, false, Opcodes); } } break; diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 4fa7deb..6765c33 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -90,6 +90,8 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) setTargetDAGCombine(ISD::SHL); setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); + setTargetDAGCombine(ISD::INTRINSIC_VOID); + setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); // AArch64 does not have i1 loads, or much of anything for i1 really. setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); @@ -889,6 +891,22 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { return "AArch64ISD::NEON_VDUP"; case AArch64ISD::NEON_VDUPLANE: return "AArch64ISD::NEON_VDUPLANE"; + case AArch64ISD::NEON_LD1_UPD: + return "AArch64ISD::NEON_LD1_UPD"; + case AArch64ISD::NEON_LD2_UPD: + return "AArch64ISD::NEON_LD2_UPD"; + case AArch64ISD::NEON_LD3_UPD: + return "AArch64ISD::NEON_LD3_UPD"; + case AArch64ISD::NEON_LD4_UPD: + return "AArch64ISD::NEON_LD4_UPD"; + case AArch64ISD::NEON_ST1_UPD: + return "AArch64ISD::NEON_ST1_UPD"; + case AArch64ISD::NEON_ST2_UPD: + return "AArch64ISD::NEON_ST2_UPD"; + case AArch64ISD::NEON_ST3_UPD: + return "AArch64ISD::NEON_ST3_UPD"; + case AArch64ISD::NEON_ST4_UPD: + return "AArch64ISD::NEON_ST4_UPD"; default: return NULL; } @@ -3448,6 +3466,108 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { return SDValue(); } +/// Target-specific DAG combine function for NEON load/store intrinsics +/// to merge base address updates. +static SDValue CombineBaseUpdate(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { + if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) + return SDValue(); + + SelectionDAG &DAG = DCI.DAG; + unsigned AddrOpIdx = 2; + SDValue Addr = N->getOperand(AddrOpIdx); + + // Search for a use of the address operand that is an increment. + for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), + UE = Addr.getNode()->use_end(); UI != UE; ++UI) { + SDNode *User = *UI; + if (User->getOpcode() != ISD::ADD || + UI.getUse().getResNo() != Addr.getResNo()) + continue; + + // Check that the add is independent of the load/store. Otherwise, folding + // it would create a cycle. + if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) + continue; + + // Find the new opcode for the updating load/store. + bool isLoad = true; + unsigned NewOpc = 0; + unsigned NumVecs = 0; + unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); + switch (IntNo) { + default: llvm_unreachable("unexpected intrinsic for Neon base update"); + case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD; + NumVecs = 1; break; + case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD; + NumVecs = 2; break; + case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD; + NumVecs = 3; break; + case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD; + NumVecs = 4; break; + case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD; + NumVecs = 1; isLoad = false; break; + case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD; + NumVecs = 2; isLoad = false; break; + case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD; + NumVecs = 3; isLoad = false; break; + case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD; + NumVecs = 4; isLoad = false; break; + } + + // Find the size of memory referenced by the load/store. + EVT VecTy; + if (isLoad) + VecTy = N->getValueType(0); + else + VecTy = N->getOperand(AddrOpIdx + 1).getValueType(); + unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; + + // If the increment is a constant, it must match the memory ref size. + SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); + if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { + uint32_t IncVal = CInc->getZExtValue(); + if (IncVal != NumBytes) + continue; + Inc = DAG.getTargetConstant(IncVal, MVT::i32); + } + + // Create the new updating load/store node. + EVT Tys[6]; + unsigned NumResultVecs = (isLoad ? NumVecs : 0); + unsigned n; + for (n = 0; n < NumResultVecs; ++n) + Tys[n] = VecTy; + Tys[n++] = MVT::i64; + Tys[n] = MVT::Other; + SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2); + SmallVector<SDValue, 8> Ops; + Ops.push_back(N->getOperand(0)); // incoming chain + Ops.push_back(N->getOperand(AddrOpIdx)); + Ops.push_back(Inc); + for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { + Ops.push_back(N->getOperand(i)); + } + MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); + SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, + Ops.data(), Ops.size(), + MemInt->getMemoryVT(), + MemInt->getMemOperand()); + + // Update the uses. + std::vector<SDValue> NewResults; + for (unsigned i = 0; i < NumResultVecs; ++i) { + NewResults.push_back(SDValue(UpdN.getNode(), i)); + } + NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain + DCI.CombineTo(N, NewResults); + DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); + + break; + } + return SDValue(); +} + SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { @@ -3461,6 +3581,21 @@ AArch64TargetLowering::PerformDAGCombine(SDNode *N, return PerformShiftCombine(N, DCI, getSubtarget()); case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); + case ISD::INTRINSIC_VOID: + case ISD::INTRINSIC_W_CHAIN: + switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { + case Intrinsic::arm_neon_vld1: + case Intrinsic::arm_neon_vld2: + case Intrinsic::arm_neon_vld3: + case Intrinsic::arm_neon_vld4: + case Intrinsic::arm_neon_vst1: + case Intrinsic::arm_neon_vst2: + case Intrinsic::arm_neon_vst3: + case Intrinsic::arm_neon_vst4: + return CombineBaseUpdate(N, DCI); + default: + break; + } } return SDValue(); } diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h index 9836d40..7effbfd 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.h +++ b/lib/Target/AArch64/AArch64ISelLowering.h @@ -142,7 +142,19 @@ namespace AArch64ISD { NEON_VDUP, // Vector dup by lane - NEON_VDUPLANE + NEON_VDUPLANE, + + // NEON loads with post-increment base updates: + NEON_LD1_UPD = ISD::FIRST_TARGET_MEMORY_OPCODE, + NEON_LD2_UPD, + NEON_LD3_UPD, + NEON_LD4_UPD, + + // NEON stores with post-increment base updates: + NEON_ST1_UPD, + NEON_ST2_UPD, + NEON_ST3_UPD, + NEON_ST4_UPD }; } diff --git a/lib/Target/AArch64/AArch64InstrFormats.td b/lib/Target/AArch64/AArch64InstrFormats.td index 44dbc9d..4782b55 100644 --- a/lib/Target/AArch64/AArch64InstrFormats.td +++ b/lib/Target/AArch64/AArch64InstrFormats.td @@ -120,6 +120,14 @@ class A64InstRdnm<dag outs, dag ins, string asmstr, let Inst{20-16} = Rm; } +class A64InstRtnm<dag outs, dag ins, string asmstr, + list<dag> patterns, InstrItinClass itin> + : A64InstRtn<outs, ins, asmstr, patterns, itin> { + bits<5> Rm; + + let Inst{20-16} = Rm; +} + //===----------------------------------------------------------------------===// // // Actual A64 Instruction Formats @@ -1216,6 +1224,24 @@ class NeonI_LdStMult<bit q, bit l, bits<4> opcode, bits<2> size, // Inherit Rt in 4-0 } +// Format AdvSIMD vector load/store multiple N-element structure (post-index) +class NeonI_LdStMult_Post<bit q, bit l, bits<4> opcode, bits<2> size, + dag outs, dag ins, string asmstr, + list<dag> patterns, InstrItinClass itin> + : A64InstRtnm<outs, ins, asmstr, patterns, itin> +{ + let Inst{31} = 0b0; + let Inst{30} = q; + let Inst{29-23} = 0b0011001; + let Inst{22} = l; + let Inst{21} = 0b0; + // Inherit Rm in 20-16 + let Inst{15-12} = opcode; + let Inst{11-10} = size; + // Inherit Rn in 9-5 + // Inherit Rt in 4-0 +} + // Format AdvSIMD 3 scalar registers with different type class NeonI_Scalar3Diff<bit u, bits<2> size, bits<4> opcode, diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td index 4ecc0dc..dbae303 100644 --- a/lib/Target/AArch64/AArch64InstrNEON.td +++ b/lib/Target/AArch64/AArch64InstrNEON.td @@ -3088,6 +3088,230 @@ def ST1_4V_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">; // End of vector load/store multiple N-element structure(class SIMD lselem) +// The followings are post-index vector load/store multiple N-element +// structure(class SIMD lselem-post) +def exact8_asmoperand : AsmOperandClass { + let Name = "Exact8"; + let PredicateMethod = "isExactImm<8>"; + let RenderMethod = "addImmOperands"; +} +def uimm_exact8 : Operand<i32>, ImmLeaf<i32, [{return Imm == 8;}]> { + let ParserMatchClass = exact8_asmoperand; +} + +def exact16_asmoperand : AsmOperandClass { + let Name = "Exact16"; + let PredicateMethod = "isExactImm<16>"; + let RenderMethod = "addImmOperands"; +} +def uimm_exact16 : Operand<i32>, ImmLeaf<i32, [{return Imm == 16;}]> { + let ParserMatchClass = exact16_asmoperand; +} + +def exact24_asmoperand : AsmOperandClass { + let Name = "Exact24"; + let PredicateMethod = "isExactImm<24>"; + let RenderMethod = "addImmOperands"; +} +def uimm_exact24 : Operand<i32>, ImmLeaf<i32, [{return Imm == 24;}]> { + let ParserMatchClass = exact24_asmoperand; +} + +def exact32_asmoperand : AsmOperandClass { + let Name = "Exact32"; + let PredicateMethod = "isExactImm<32>"; + let RenderMethod = "addImmOperands"; +} +def uimm_exact32 : Operand<i32>, ImmLeaf<i32, [{return Imm == 32;}]> { + let ParserMatchClass = exact32_asmoperand; +} + +def exact48_asmoperand : AsmOperandClass { + let Name = "Exact48"; + let PredicateMethod = "isExactImm<48>"; + let RenderMethod = "addImmOperands"; +} +def uimm_exact48 : Operand<i32>, ImmLeaf<i32, [{return Imm == 48;}]> { + let ParserMatchClass = exact48_asmoperand; +} + +def exact64_asmoperand : AsmOperandClass { + let Name = "Exact64"; + let PredicateMethod = "isExactImm<64>"; + let RenderMethod = "addImmOperands"; +} +def uimm_exact64 : Operand<i32>, ImmLeaf<i32, [{return Imm == 64;}]> { + let ParserMatchClass = exact64_asmoperand; +} + +multiclass NeonI_LDWB_VList<bit q, bits<4> opcode, bits<2> size, + RegisterOperand VecList, Operand ImmTy, + string asmop> { + let Constraints = "$Rn = $wb", mayLoad = 1, neverHasSideEffects = 1, + DecoderMethod = "DecodeVLDSTPostInstruction" in { + def _fixed : NeonI_LdStMult_Post<q, 1, opcode, size, + (outs VecList:$Rt, GPR64xsp:$wb), + (ins GPR64xsp:$Rn, ImmTy:$amt), + asmop # "\t$Rt, [$Rn], $amt", + [], + NoItinerary> { + let Rm = 0b11111; + } + + def _register : NeonI_LdStMult_Post<q, 1, opcode, size, + (outs VecList:$Rt, GPR64xsp:$wb), + (ins GPR64xsp:$Rn, GPR64noxzr:$Rm), + asmop # "\t$Rt, [$Rn], $Rm", + [], + NoItinerary>; + } +} + +multiclass LDWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy, + Operand ImmTy2, string asmop> { + defm _8B : NeonI_LDWB_VList<0, opcode, 0b00, + !cast<RegisterOperand>(List # "8B_operand"), + ImmTy, asmop>; + + defm _4H : NeonI_LDWB_VList<0, opcode, 0b01, + !cast<RegisterOperand>(List # "4H_operand"), + ImmTy, asmop>; + + defm _2S : NeonI_LDWB_VList<0, opcode, 0b10, + !cast<RegisterOperand>(List # "2S_operand"), + ImmTy, asmop>; + + defm _16B : NeonI_LDWB_VList<1, opcode, 0b00, + !cast<RegisterOperand>(List # "16B_operand"), + ImmTy2, asmop>; + + defm _8H : NeonI_LDWB_VList<1, opcode, 0b01, + !cast<RegisterOperand>(List # "8H_operand"), + ImmTy2, asmop>; + + defm _4S : NeonI_LDWB_VList<1, opcode, 0b10, + !cast<RegisterOperand>(List # "4S_operand"), + ImmTy2, asmop>; + + defm _2D : NeonI_LDWB_VList<1, opcode, 0b11, + !cast<RegisterOperand>(List # "2D_operand"), + ImmTy2, asmop>; +} + +// Post-index load multiple N-element structures from N registers (N = 1,2,3,4) +defm LD1WB : LDWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "ld1">; +defm LD1WB_1D : NeonI_LDWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8, + "ld1">; + +defm LD2WB : LDWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "ld2">; + +defm LD3WB : LDWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48, + "ld3">; + +defm LD4WB : LDWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "ld4">; + +// Post-index load multiple 1-element structures from N consecutive registers +// (N = 2,3,4) +defm LD1WB2V : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32, + "ld1">; +defm LD1WB2V_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand, + uimm_exact16, "ld1">; + +defm LD1WB3V : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48, + "ld1">; +defm LD1WB3V_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand, + uimm_exact24, "ld1">; + +defm LD1WB_4V : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64, + "ld1">; +defm LD1WB4V_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand, + uimm_exact32, "ld1">; + +multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size, + RegisterOperand VecList, Operand ImmTy, + string asmop> { + let Constraints = "$Rn = $wb", mayStore = 1, neverHasSideEffects = 1, + DecoderMethod = "DecodeVLDSTPostInstruction" in { + def _fixed : NeonI_LdStMult_Post<q, 0, opcode, size, + (outs GPR64xsp:$wb), + (ins GPR64xsp:$Rn, ImmTy:$amt, VecList:$Rt), + asmop # "\t$Rt, [$Rn], $amt", + [], + NoItinerary> { + let Rm = 0b11111; + } + + def _register : NeonI_LdStMult_Post<q, 0, opcode, size, + (outs GPR64xsp:$wb), + (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VecList:$Rt), + asmop # "\t$Rt, [$Rn], $Rm", + [], + NoItinerary>; + } +} + +multiclass STWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy, + Operand ImmTy2, string asmop> { + defm _8B : NeonI_STWB_VList<0, opcode, 0b00, + !cast<RegisterOperand>(List # "8B_operand"), ImmTy, asmop>; + + defm _4H : NeonI_STWB_VList<0, opcode, 0b01, + !cast<RegisterOperand>(List # "4H_operand"), + ImmTy, asmop>; + + defm _2S : NeonI_STWB_VList<0, opcode, 0b10, + !cast<RegisterOperand>(List # "2S_operand"), + ImmTy, asmop>; + + defm _16B : NeonI_STWB_VList<1, opcode, 0b00, + !cast<RegisterOperand>(List # "16B_operand"), + ImmTy2, asmop>; + + defm _8H : NeonI_STWB_VList<1, opcode, 0b01, + !cast<RegisterOperand>(List # "8H_operand"), + ImmTy2, asmop>; + + defm _4S : NeonI_STWB_VList<1, opcode, 0b10, + !cast<RegisterOperand>(List # "4S_operand"), + ImmTy2, asmop>; + + defm _2D : NeonI_STWB_VList<1, opcode, 0b11, + !cast<RegisterOperand>(List # "2D_operand"), + ImmTy2, asmop>; +} + +// Post-index load multiple N-element structures from N registers (N = 1,2,3,4) +defm ST1WB : STWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "st1">; +defm ST1WB_1D : NeonI_STWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8, + "st1">; + +defm ST2WB : STWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "st2">; + +defm ST3WB : STWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48, + "st3">; + +defm ST4WB : STWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "st4">; + +// Post-index load multiple 1-element structures from N consecutive registers +// (N = 2,3,4) +defm ST1WB2V : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32, + "st1">; +defm ST1WB2V_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand, + uimm_exact16, "st1">; + +defm ST1WB3V : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48, + "st1">; +defm ST1WB3V_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand, + uimm_exact24, "st1">; + +defm ST1WB4V : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64, + "st1">; +defm ST1WB4V_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand, + uimm_exact32, "st1">; + +// End of post-index vector load/store multiple N-element structure +// (class SIMD lselem-post) + // Scalar Three Same class NeonI_Scalar3Same_size<bit u, bits<2> size, bits<5> opcode, string asmop, diff --git a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index 1f7a7d8..da5dd93 100644 --- a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -815,6 +815,17 @@ public: return true; } + // if value == N, return true + template<int N> + bool isExactImm() const { + if (!isImm()) return false; + + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); + if (!CE) return false; + + return CE->getValue() == N; + } + static AArch64Operand *CreateImmWithLSL(const MCExpr *Val, unsigned ShiftAmount, bool ImplicitAmount, diff --git a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp index db1da49..38845b6 100644 --- a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp +++ b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp @@ -89,6 +89,11 @@ static DecodeStatus DecodeFPR128LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeGPR64noxzrRegisterClass(llvm::MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + static DecodeStatus DecodeDPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo, uint64_t Address, const void *Decoder); @@ -223,6 +228,9 @@ static DecodeStatus DecodeSingleIndexedInstruction(llvm::MCInst &Inst, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeVLDSTPostInstruction(MCInst &Inst, unsigned Val, + uint64_t Address, + const void *Decoder); static bool Check(DecodeStatus &Out, DecodeStatus In); @@ -392,6 +400,18 @@ DecodeFPR128LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo, return DecodeFPR128RegisterClass(Inst, RegNo, Address, Decoder); } +static DecodeStatus DecodeGPR64noxzrRegisterClass(llvm::MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= 30) + return MCDisassembler::Fail; + + uint16_t Register = getReg(Decoder, AArch64::GPR64noxzrRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Register)); + return MCDisassembler::Success; +} + static DecodeStatus DecodeRegisterClassByID(llvm::MCInst &Inst, unsigned RegNo, unsigned RegID, const void *Decoder) { @@ -984,3 +1004,91 @@ DecodeNeonMovImmShiftOperand(llvm::MCInst &Inst, unsigned ShiftAmount, Inst.addOperand(MCOperand::CreateImm(ShiftAmount)); return MCDisassembler::Success; } + +// Decode post-index vector load/store instructions. +// This is necessary as we need to decode Rm: if Rm == 0b11111, the last +// operand is an immediate equal the the length of vector list in bytes, +// or Rm is decoded to a GPR64noxzr register. +static DecodeStatus DecodeVLDSTPostInstruction(MCInst &Inst, unsigned Insn, + uint64_t Address, + const void *Decoder) { + unsigned Rt = fieldFromInstruction(Insn, 0, 5); + unsigned Rn = fieldFromInstruction(Insn, 5, 5); + unsigned Rm = fieldFromInstruction(Insn, 16, 5); + unsigned Opcode = fieldFromInstruction(Insn, 12, 4); + unsigned IsLoad = fieldFromInstruction(Insn, 22, 1); + // 0 for 64bit vector list, 1 for 128bit vector list + unsigned Is128BitVec = fieldFromInstruction(Insn, 30, 1); + + unsigned NumVecs; + switch (Opcode) { + case 0: // ld4/st4 + case 2: // ld1/st1 with 4 vectors + NumVecs = 4; break; + case 4: // ld3/st3 + case 6: // ld1/st1 with 3 vectors + NumVecs = 3; break; + case 7: // ld1/st1 with 1 vector + NumVecs = 1; break; + case 8: // ld2/st2 + case 10: // ld1/st1 with 2 vectors + NumVecs = 2; break; + default: + llvm_unreachable("Invalid opcode for post-index load/store instructions"); + } + + // Decode vector list of 1/2/3/4 vectors for load instructions. + if (IsLoad) { + switch (NumVecs) { + case 1: + Is128BitVec ? DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder) + : DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder); + break; + case 2: + Is128BitVec ? DecodeQPairRegisterClass(Inst, Rt, Address, Decoder) + : DecodeDPairRegisterClass(Inst, Rt, Address, Decoder); + break; + case 3: + Is128BitVec ? DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder) + : DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder); + break; + case 4: + Is128BitVec ? DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder) + : DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder); + break; + } + } + + // Decode write back register, which is equal to Rn. + DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder); + DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder); + + if (Rm == 31) // If Rm is 0x11111, add the vector list length in byte + Inst.addOperand(MCOperand::CreateImm(NumVecs * (Is128BitVec ? 16 : 8))); + else // Decode Rm + DecodeGPR64noxzrRegisterClass(Inst, Rm, Address, Decoder); + + // Decode vector list of 1/2/3/4 vectors for load instructions. + if (!IsLoad) { + switch (NumVecs) { + case 1: + Is128BitVec ? DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder) + : DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder); + break; + case 2: + Is128BitVec ? DecodeQPairRegisterClass(Inst, Rt, Address, Decoder) + : DecodeDPairRegisterClass(Inst, Rt, Address, Decoder); + break; + case 3: + Is128BitVec ? DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder) + : DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder); + break; + case 4: + Is128BitVec ? DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder) + : DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder); + break; + } + } + + return MCDisassembler::Success; +} |