aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-05-29 02:49:00 -0700
committerStephen Hines <srhines@google.com>2014-05-29 02:49:00 -0700
commitdce4a407a24b04eebc6a376f8e62b41aaa7b071f (patch)
treedcebc53f2b182f145a2e659393bf9a0472cedf23 /lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
parent220b921aed042f9e520c26cffd8282a94c66c3d5 (diff)
downloadexternal_llvm-dce4a407a24b04eebc6a376f8e62b41aaa7b071f.zip
external_llvm-dce4a407a24b04eebc6a376f8e62b41aaa7b071f.tar.gz
external_llvm-dce4a407a24b04eebc6a376f8e62b41aaa7b071f.tar.bz2
Update LLVM for 3.5 rebase (r209712).
Change-Id: I149556c940fb7dc92d075273c87ff584f400941f
Diffstat (limited to 'lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp')
-rw-r--r--lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp5249
1 files changed, 3310 insertions, 1939 deletions
diff --git a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index e933ec1..65b77c5 100644
--- a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -6,34 +6,31 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file contains the (GNU-style) assembly parser for the AArch64
-// architecture.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
#include "MCTargetDesc/AArch64MCExpr.h"
#include "Utils/AArch64BaseInfo.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCTargetAsmParser.h"
-#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+#include <cstdio>
using namespace llvm;
namespace {
@@ -41,21 +38,74 @@ namespace {
class AArch64Operand;
class AArch64AsmParser : public MCTargetAsmParser {
+public:
+ typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
+
+private:
+ StringRef Mnemonic; ///< Instruction mnemonic.
MCSubtargetInfo &STI;
MCAsmParser &Parser;
+ MCAsmParser &getParser() const { return Parser; }
+ MCAsmLexer &getLexer() const { return Parser.getLexer(); }
+
+ SMLoc getLoc() const { return Parser.getTok().getLoc(); }
+
+ bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
+ AArch64CC::CondCode parseCondCodeString(StringRef Cond);
+ bool parseCondCode(OperandVector &Operands, bool invertCondCode);
+ int tryParseRegister();
+ int tryMatchVectorRegister(StringRef &Kind, bool expected);
+ bool parseRegister(OperandVector &Operands);
+ bool parseSymbolicImmVal(const MCExpr *&ImmVal);
+ bool parseVectorList(OperandVector &Operands);
+ bool parseOperand(OperandVector &Operands, bool isCondCode,
+ bool invertCondCode);
+
+ void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
+ bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
+ bool showMatchError(SMLoc Loc, unsigned ErrCode);
+
+ bool parseDirectiveWord(unsigned Size, SMLoc L);
+ bool parseDirectiveTLSDescCall(SMLoc L);
+
+ bool parseDirectiveLOH(StringRef LOH, SMLoc L);
+
+ bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands, MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) override;
+/// @name Auto-generated Match Functions
+/// {
+
#define GET_ASSEMBLER_HEADER
#include "AArch64GenAsmMatcher.inc"
+ /// }
+
+ OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
+ OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
+ OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
+ OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
+ OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
+ OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
+ OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
+ OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
+ OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
+ OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
+ OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
+ bool tryParseVectorRegister(OperandVector &Operands);
+
public:
enum AArch64MatchResultTy {
- Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
+ Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
#define GET_OPERAND_DIAGNOSTIC_TYPES
#include "AArch64GenAsmMatcher.inc"
};
-
AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
- const MCInstrInfo &MII)
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options)
: MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
MCAsmParserExtension::Initialize(_Parser);
@@ -63,191 +113,197 @@ public:
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
}
- // These are the public interface of the MCTargetAsmParser
- bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
- SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- bool ParseDirective(AsmToken DirectiveID);
- bool ParseDirectiveTLSDescCall(SMLoc L);
- bool ParseDirectiveWord(unsigned Size, SMLoc L);
-
- bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer&Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm);
-
- // The rest of the sub-parsers have more freedom over interface: they return
- // an OperandMatchResultTy because it's less ambiguous than true/false or
- // -1/0/1 even if it is more verbose
- OperandMatchResultTy
- ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- StringRef Mnemonic);
-
- OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
-
- OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
-
- OperandMatchResultTy
- ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- uint32_t NumLanes);
-
- OperandMatchResultTy
- ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- uint32_t &NumLanes);
-
- OperandMatchResultTy
- ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseFPImm0AndImm0Operand( SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- template<typename SomeNamedImmMapper> OperandMatchResultTy
- ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
- }
-
- OperandMatchResultTy
- ParseNamedImmOperand(const NamedImmMapper &Mapper,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout,
- SMLoc &LayoutLoc);
-
- OperandMatchResultTy ParseVectorList(SmallVectorImpl<MCParsedAsmOperand *> &);
-
- bool validateInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- /// Scan the next token (which had better be an identifier) and determine
- /// whether it represents a general-purpose or vector register. It returns
- /// true if an identifier was found and populates its reference arguments. It
- /// does not consume the token.
- bool
- IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
- SMLoc &LayoutLoc) const;
-
+ SMLoc NameLoc, OperandVector &Operands) override;
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
+ bool ParseDirective(AsmToken DirectiveID) override;
+ unsigned validateTargetOperandClass(MCParsedAsmOperand *Op,
+ unsigned Kind) override;
+
+ static bool classifySymbolRef(const MCExpr *Expr,
+ AArch64MCExpr::VariantKind &ELFRefKind,
+ MCSymbolRefExpr::VariantKind &DarwinRefKind,
+ int64_t &Addend);
};
-
-}
+} // end anonymous namespace
namespace {
-/// Instances of this class represent a parsed AArch64 machine instruction.
+/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
+/// instruction.
class AArch64Operand : public MCParsedAsmOperand {
private:
enum KindTy {
- k_ImmWithLSL, // #uimm {, LSL #amt }
- k_CondCode, // eq/ne/...
- k_FPImmediate, // Limited-precision floating-point imm
- k_Immediate, // Including expressions referencing symbols
+ k_Immediate,
+ k_ShiftedImm,
+ k_CondCode,
k_Register,
+ k_VectorList,
+ k_VectorIndex,
+ k_Token,
+ k_SysReg,
+ k_SysCR,
+ k_Prefetch,
k_ShiftExtend,
- k_VectorList, // A sequential list of 1 to 4 registers.
- k_SysReg, // The register operand of MRS and MSR instructions
- k_Token, // The mnemonic; other raw tokens the auto-generated
- k_WrappedRegister // Load/store exclusive permit a wrapped register.
+ k_FPImm,
+ k_Barrier
} Kind;
SMLoc StartLoc, EndLoc;
- struct ImmWithLSLOp {
- const MCExpr *Val;
- unsigned ShiftAmount;
- bool ImplicitAmount;
+ struct TokOp {
+ const char *Data;
+ unsigned Length;
+ bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
};
- struct CondCodeOp {
- A64CC::CondCodes Code;
+ struct RegOp {
+ unsigned RegNum;
+ bool isVector;
};
- struct FPImmOp {
- double Val;
+ struct VectorListOp {
+ unsigned RegNum;
+ unsigned Count;
+ unsigned NumElements;
+ unsigned ElementKind;
+ };
+
+ struct VectorIndexOp {
+ unsigned Val;
};
struct ImmOp {
const MCExpr *Val;
};
- struct RegOp {
- unsigned RegNum;
+ struct ShiftedImmOp {
+ const MCExpr *Val;
+ unsigned ShiftAmount;
};
- struct ShiftExtendOp {
- A64SE::ShiftExtSpecifiers ShiftType;
- unsigned Amount;
- bool ImplicitAmount;
+ struct CondCodeOp {
+ AArch64CC::CondCode Code;
};
- // A vector register list is a sequential list of 1 to 4 registers.
- struct VectorListOp {
- unsigned RegNum;
- unsigned Count;
- A64Layout::VectorLayout Layout;
+ struct FPImmOp {
+ unsigned Val; // Encoded 8-bit representation.
+ };
+
+ struct BarrierOp {
+ unsigned Val; // Not the enum since not all values have names.
};
struct SysRegOp {
const char *Data;
unsigned Length;
+ uint64_t FeatureBits; // We need to pass through information about which
+ // core we are compiling for so that the SysReg
+ // Mappers can appropriately conditionalize.
};
- struct TokOp {
- const char *Data;
- unsigned Length;
+ struct SysCRImmOp {
+ unsigned Val;
+ };
+
+ struct PrefetchOp {
+ unsigned Val;
+ };
+
+ struct ShiftExtendOp {
+ AArch64_AM::ShiftExtendType Type;
+ unsigned Amount;
+ bool HasExplicitAmount;
+ };
+
+ struct ExtendOp {
+ unsigned Val;
};
union {
- struct ImmWithLSLOp ImmWithLSL;
- struct CondCodeOp CondCode;
- struct FPImmOp FPImm;
- struct ImmOp Imm;
+ struct TokOp Tok;
struct RegOp Reg;
- struct ShiftExtendOp ShiftExtend;
struct VectorListOp VectorList;
+ struct VectorIndexOp VectorIndex;
+ struct ImmOp Imm;
+ struct ShiftedImmOp ShiftedImm;
+ struct CondCodeOp CondCode;
+ struct FPImmOp FPImm;
+ struct BarrierOp Barrier;
struct SysRegOp SysReg;
- struct TokOp Tok;
+ struct SysCRImmOp SysCRImm;
+ struct PrefetchOp Prefetch;
+ struct ShiftExtendOp ShiftExtend;
};
- AArch64Operand(KindTy K, SMLoc S, SMLoc E)
- : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
+ // Keep the MCContext around as the MCExprs may need manipulated during
+ // the add<>Operands() calls.
+ MCContext &Ctx;
+
+ AArch64Operand(KindTy K, MCContext &_Ctx)
+ : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
public:
- AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
+ AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
+ Kind = o.Kind;
+ StartLoc = o.StartLoc;
+ EndLoc = o.EndLoc;
+ switch (Kind) {
+ case k_Token:
+ Tok = o.Tok;
+ break;
+ case k_Immediate:
+ Imm = o.Imm;
+ break;
+ case k_ShiftedImm:
+ ShiftedImm = o.ShiftedImm;
+ break;
+ case k_CondCode:
+ CondCode = o.CondCode;
+ break;
+ case k_FPImm:
+ FPImm = o.FPImm;
+ break;
+ case k_Barrier:
+ Barrier = o.Barrier;
+ break;
+ case k_Register:
+ Reg = o.Reg;
+ break;
+ case k_VectorList:
+ VectorList = o.VectorList;
+ break;
+ case k_VectorIndex:
+ VectorIndex = o.VectorIndex;
+ break;
+ case k_SysReg:
+ SysReg = o.SysReg;
+ break;
+ case k_SysCR:
+ SysCRImm = o.SysCRImm;
+ break;
+ case k_Prefetch:
+ Prefetch = o.Prefetch;
+ break;
+ case k_ShiftExtend:
+ ShiftExtend = o.ShiftExtend;
+ break;
+ }
}
- SMLoc getStartLoc() const { return StartLoc; }
- SMLoc getEndLoc() const { return EndLoc; }
- void print(raw_ostream&) const;
- void dump() const;
+ /// getStartLoc - Get the location of the first token of this operand.
+ SMLoc getStartLoc() const override { return StartLoc; }
+ /// getEndLoc - Get the location of the last token of this operand.
+ SMLoc getEndLoc() const override { return EndLoc; }
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
return StringRef(Tok.Data, Tok.Length);
}
- unsigned getReg() const {
- assert((Kind == k_Register || Kind == k_WrappedRegister)
- && "Invalid access!");
- return Reg.RegNum;
+ bool isTokenSuffix() const {
+ assert(Kind == k_Token && "Invalid access!");
+ return Tok.IsSuffix;
}
const MCExpr *getImm() const {
@@ -255,1234 +311,1778 @@ public:
return Imm.Val;
}
- A64CC::CondCodes getCondCode() const {
- assert(Kind == k_CondCode && "Invalid access!");
- return CondCode.Code;
+ const MCExpr *getShiftedImmVal() const {
+ assert(Kind == k_ShiftedImm && "Invalid access!");
+ return ShiftedImm.Val;
}
- static bool isNonConstantExpr(const MCExpr *E,
- AArch64MCExpr::VariantKind &Variant) {
- if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
- Variant = A64E->getKind();
- return true;
- } else if (!isa<MCConstantExpr>(E)) {
- Variant = AArch64MCExpr::VK_AARCH64_None;
- return true;
- }
-
- return false;
+ unsigned getShiftedImmShift() const {
+ assert(Kind == k_ShiftedImm && "Invalid access!");
+ return ShiftedImm.ShiftAmount;
}
- bool isCondCode() const { return Kind == k_CondCode; }
- bool isToken() const { return Kind == k_Token; }
- bool isReg() const { return Kind == k_Register; }
- bool isImm() const { return Kind == k_Immediate; }
- bool isMem() const { return false; }
- bool isFPImm() const { return Kind == k_FPImmediate; }
- bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
- bool isSysReg() const { return Kind == k_SysReg; }
- bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
- bool isWrappedReg() const { return Kind == k_WrappedRegister; }
-
- bool isAddSubImmLSL0() const {
- if (!isImmWithLSL()) return false;
- if (ImmWithLSL.ShiftAmount != 0) return false;
-
- AArch64MCExpr::VariantKind Variant;
- if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
- return Variant == AArch64MCExpr::VK_AARCH64_LO12
- || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
- || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
- || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
- || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
- || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
- }
-
- // Otherwise it should be a real immediate in range:
- const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
- return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
+ AArch64CC::CondCode getCondCode() const {
+ assert(Kind == k_CondCode && "Invalid access!");
+ return CondCode.Code;
}
- bool isAddSubImmLSL12() const {
- if (!isImmWithLSL()) return false;
- if (ImmWithLSL.ShiftAmount != 12) return false;
-
- AArch64MCExpr::VariantKind Variant;
- if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
- return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
- || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
- }
-
- // Otherwise it should be a real immediate in range:
- const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
- return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
+ unsigned getFPImm() const {
+ assert(Kind == k_FPImm && "Invalid access!");
+ return FPImm.Val;
}
- template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
- if (!isShiftOrExtend()) return false;
-
- A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
- if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
- return false;
-
- if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
- return false;
-
- return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
+ unsigned getBarrier() const {
+ assert(Kind == k_Barrier && "Invalid access!");
+ return Barrier.Val;
}
- bool isAdrpLabel() const {
- if (!isImm()) return false;
-
- AArch64MCExpr::VariantKind Variant;
- if (isNonConstantExpr(getImm(), Variant)) {
- return Variant == AArch64MCExpr::VK_AARCH64_None
- || Variant == AArch64MCExpr::VK_AARCH64_GOT
- || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
- || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
- }
-
- return isLabel<21, 4096>();
+ unsigned getReg() const override {
+ assert(Kind == k_Register && "Invalid access!");
+ return Reg.RegNum;
}
- template<unsigned RegWidth> bool isBitfieldWidth() const {
- if (!isImm()) return false;
-
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
-
- return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
+ unsigned getVectorListStart() const {
+ assert(Kind == k_VectorList && "Invalid access!");
+ return VectorList.RegNum;
}
- template<int RegWidth>
- bool isCVTFixedPos() const {
- if (!isImm()) return false;
-
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
-
- return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
+ unsigned getVectorListCount() const {
+ assert(Kind == k_VectorList && "Invalid access!");
+ return VectorList.Count;
}
- bool isFMOVImm() const {
- if (!isFPImm()) return false;
-
- APFloat RealVal(FPImm.Val);
- uint32_t ImmVal;
- return A64Imms::isFPImm(RealVal, ImmVal);
+ unsigned getVectorIndex() const {
+ assert(Kind == k_VectorIndex && "Invalid access!");
+ return VectorIndex.Val;
}
- bool isFPZero() const {
- if (!isFPImm()) return false;
+ StringRef getSysReg() const {
+ assert(Kind == k_SysReg && "Invalid access!");
+ return StringRef(SysReg.Data, SysReg.Length);
+ }
- APFloat RealVal(FPImm.Val);
- return RealVal.isPosZero();
+ uint64_t getSysRegFeatureBits() const {
+ assert(Kind == k_SysReg && "Invalid access!");
+ return SysReg.FeatureBits;
}
- template<unsigned field_width, unsigned scale>
- bool isLabel() const {
- if (!isImm()) return false;
+ unsigned getSysCR() const {
+ assert(Kind == k_SysCR && "Invalid access!");
+ return SysCRImm.Val;
+ }
- if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
- return true;
- } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
- int64_t Val = CE->getValue();
- int64_t Min = - (scale * (1LL << (field_width - 1)));
- int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
- return (Val % scale) == 0 && Val >= Min && Val <= Max;
- }
+ unsigned getPrefetch() const {
+ assert(Kind == k_Prefetch && "Invalid access!");
+ return Prefetch.Val;
+ }
- // N.b. this disallows explicit relocation specifications via an
- // AArch64MCExpr. Users needing that behaviour
- return false;
+ AArch64_AM::ShiftExtendType getShiftExtendType() const {
+ assert(Kind == k_ShiftExtend && "Invalid access!");
+ return ShiftExtend.Type;
}
- bool isLane1() const {
- if (!isImm()) return false;
+ unsigned getShiftExtendAmount() const {
+ assert(Kind == k_ShiftExtend && "Invalid access!");
+ return ShiftExtend.Amount;
+ }
- // Because it's come through custom assembly parsing, it must always be a
- // constant expression.
- return cast<MCConstantExpr>(getImm())->getValue() == 1;
+ bool hasShiftExtendAmount() const {
+ assert(Kind == k_ShiftExtend && "Invalid access!");
+ return ShiftExtend.HasExplicitAmount;
}
- bool isLoadLitLabel() const {
- if (!isImm()) return false;
+ bool isImm() const override { return Kind == k_Immediate; }
+ bool isMem() const override { return false; }
+ bool isSImm9() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= -256 && Val < 256);
+ }
+ bool isSImm7s4() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
+ }
+ bool isSImm7s8() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
+ }
+ bool isSImm7s16() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
+ }
+
+ bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
+ Addend)) {
+ // If we don't understand the expression, assume the best and
+ // let the fixup and relocation code deal with it.
+ return true;
+ }
- AArch64MCExpr::VariantKind Variant;
- if (isNonConstantExpr(getImm(), Variant)) {
- return Variant == AArch64MCExpr::VK_AARCH64_None
- || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
+ if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
+ ELFRefKind == AArch64MCExpr::VK_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
+ // Note that we don't range-check the addend. It's adjusted modulo page
+ // size when converted, so there is no "out of range" condition when using
+ // @pageoff.
+ return Addend >= 0 && (Addend % Scale) == 0;
+ } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
+ DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
+ // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
+ return Addend == 0;
}
- return isLabel<19, 4>();
+ return false;
}
- template<unsigned RegWidth> bool isLogicalImm() const {
- if (!isImm()) return false;
+ template <int Scale> bool isUImm12Offset() const {
+ if (!isImm())
+ return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
- if (!CE) return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return isSymbolicUImm12Offset(getImm(), Scale);
- uint32_t Bits;
- return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
+ int64_t Val = MCE->getValue();
+ return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
}
- template<unsigned RegWidth> bool isLogicalImmMOV() const {
- if (!isLogicalImm<RegWidth>()) return false;
-
- const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
-
- // The move alias for ORR is only valid if the immediate cannot be
- // represented with a move (immediate) instruction; they take priority.
- int UImm16, Shift;
- return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
- && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
+ bool isImm0_7() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 8);
+ }
+ bool isImm1_8() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val > 0 && Val < 9);
+ }
+ bool isImm0_15() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 16);
+ }
+ bool isImm1_16() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val > 0 && Val < 17);
+ }
+ bool isImm0_31() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 32);
+ }
+ bool isImm1_31() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 1 && Val < 32);
+ }
+ bool isImm1_32() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 1 && Val < 33);
+ }
+ bool isImm0_63() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 64);
+ }
+ bool isImm1_63() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 1 && Val < 64);
+ }
+ bool isImm1_64() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 1 && Val < 65);
+ }
+ bool isImm0_127() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 128);
+ }
+ bool isImm0_255() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 256);
+ }
+ bool isImm0_65535() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 65536);
+ }
+ bool isImm32_63() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 32 && Val < 64);
+ }
+ bool isLogicalImm32() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ return AArch64_AM::isLogicalImmediate(MCE->getValue(), 32);
}
+ bool isLogicalImm64() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
+ }
+ bool isShiftedImm() const { return Kind == k_ShiftedImm; }
+ bool isAddSubImm() const {
+ if (!isShiftedImm() && !isImm())
+ return false;
- template<int MemSize>
- bool isOffsetUImm12() const {
- if (!isImm()) return false;
+ const MCExpr *Expr;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
+ if (isShiftedImm()) {
+ unsigned Shift = ShiftedImm.ShiftAmount;
+ Expr = ShiftedImm.Val;
+ if (Shift != 0 && Shift != 12)
+ return false;
+ } else {
+ Expr = getImm();
+ }
- // Assume they know what they're doing for now if they've given us a
- // non-constant expression. In principle we could check for ridiculous
- // things that can't possibly work or relocations that would almost
- // certainly break resulting code.
- if (!CE)
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
+ DarwinRefKind, Addend)) {
+ return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
+ || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
+ || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
+ || ELFRefKind == AArch64MCExpr::VK_LO12
+ || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
+ || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
+ || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
+ || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
+ || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
+ || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
+ || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
+ }
+
+ // Otherwise it should be a real immediate in range:
+ const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
+ return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
+ }
+ bool isCondCode() const { return Kind == k_CondCode; }
+ bool isSIMDImmType10() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
+ }
+ bool isBranchTarget26() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return true;
+ int64_t Val = MCE->getValue();
+ if (Val & 0x3)
+ return false;
+ return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
+ }
+ bool isPCRelLabel19() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return true;
+ int64_t Val = MCE->getValue();
+ if (Val & 0x3)
+ return false;
+ return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
+ }
+ bool isBranchTarget14() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
return true;
+ int64_t Val = MCE->getValue();
+ if (Val & 0x3)
+ return false;
+ return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
+ }
- int64_t Val = CE->getValue();
+ bool
+ isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
+ if (!isImm())
+ return false;
- // Must be a multiple of the access size in bytes.
- if ((Val & (MemSize - 1)) != 0) return false;
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
+ DarwinRefKind, Addend)) {
+ return false;
+ }
+ if (DarwinRefKind != MCSymbolRefExpr::VK_None)
+ return false;
- // Must be 12-bit unsigned
- return Val >= 0 && Val <= 0xfff * MemSize;
- }
+ for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
+ if (ELFRefKind == AllowedModifiers[i])
+ return Addend == 0;
+ }
- template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
- bool isShift() const {
- if (!isShiftOrExtend()) return false;
+ return false;
+ }
- if (ShiftExtend.ShiftType != SHKind)
- return false;
+ bool isMovZSymbolG3() const {
+ static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
+ return isMovWSymbol(Variants);
+ }
- return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
+ bool isMovZSymbolG2() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
+ AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
+ return isMovWSymbol(Variants);
}
- bool isMOVN32Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_SABS_G0,
- AArch64MCExpr::VK_AARCH64_SABS_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G0,
+ bool isMovZSymbolG1() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
+ AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
+ AArch64MCExpr::VK_DTPREL_G1,
};
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
- return isMoveWideImm(32, PermittedModifiers, NumModifiers);
- }
-
- bool isMOVN64Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_SABS_G0,
- AArch64MCExpr::VK_AARCH64_SABS_G1,
- AArch64MCExpr::VK_AARCH64_SABS_G2,
- AArch64MCExpr::VK_AARCH64_DTPREL_G2,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G2,
- AArch64MCExpr::VK_AARCH64_TPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G0,
- };
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
- return isMoveWideImm(64, PermittedModifiers, NumModifiers);
+ return isMovWSymbol(Variants);
}
+ bool isMovZSymbolG0() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
+ AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
+ return isMovWSymbol(Variants);
+ }
- bool isMOVZ32Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_ABS_G0,
- AArch64MCExpr::VK_AARCH64_ABS_G1,
- AArch64MCExpr::VK_AARCH64_SABS_G0,
- AArch64MCExpr::VK_AARCH64_SABS_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G0,
- };
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
- return isMoveWideImm(32, PermittedModifiers, NumModifiers);
- }
-
- bool isMOVZ64Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_ABS_G0,
- AArch64MCExpr::VK_AARCH64_ABS_G1,
- AArch64MCExpr::VK_AARCH64_ABS_G2,
- AArch64MCExpr::VK_AARCH64_ABS_G3,
- AArch64MCExpr::VK_AARCH64_SABS_G0,
- AArch64MCExpr::VK_AARCH64_SABS_G1,
- AArch64MCExpr::VK_AARCH64_SABS_G2,
- AArch64MCExpr::VK_AARCH64_DTPREL_G2,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G2,
- AArch64MCExpr::VK_AARCH64_TPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G0,
- };
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ bool isMovKSymbolG3() const {
+ static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
+ return isMovWSymbol(Variants);
+ }
- return isMoveWideImm(64, PermittedModifiers, NumModifiers);
+ bool isMovKSymbolG2() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G2_NC};
+ return isMovWSymbol(Variants);
}
- bool isMOVK32Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
- AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
- AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
- AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
- };
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
- return isMoveWideImm(32, PermittedModifiers, NumModifiers);
- }
-
- bool isMOVK64Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
- AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
- AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
- AArch64MCExpr::VK_AARCH64_ABS_G3,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
- AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
- AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
+ bool isMovKSymbolG1() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
+ AArch64MCExpr::VK_DTPREL_G1_NC
};
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
- return isMoveWideImm(64, PermittedModifiers, NumModifiers);
+ return isMovWSymbol(Variants);
}
- bool isMoveWideImm(unsigned RegWidth,
- const AArch64MCExpr::VariantKind *PermittedModifiers,
- unsigned NumModifiers) const {
- if (!isImmWithLSL()) return false;
+ bool isMovKSymbolG0() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
+ AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
+ };
+ return isMovWSymbol(Variants);
+ }
- if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
- if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
+ template<int RegWidth, int Shift>
+ bool isMOVZMovAlias() const {
+ if (!isImm()) return false;
- AArch64MCExpr::VariantKind Modifier;
- if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
- // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
- if (!ImmWithLSL.ImplicitAmount) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ uint64_t Value = CE->getValue();
- for (unsigned i = 0; i < NumModifiers; ++i)
- if (PermittedModifiers[i] == Modifier) return true;
+ if (RegWidth == 32)
+ Value &= 0xffffffffULL;
+ // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
+ if (Value == 0 && Shift != 0)
return false;
- }
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
- return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
+ return (Value & ~(0xffffULL << Shift)) == 0;
}
- template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
- bool isMoveWideMovAlias() const {
+ template<int RegWidth, int Shift>
+ bool isMOVNMovAlias() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
-
- int UImm16, Shift;
uint64_t Value = CE->getValue();
- // If this is a 32-bit instruction then all bits above 32 should be the
- // same: either of these is fine because signed/unsigned values should be
- // permitted.
- if (RegWidth == 32) {
- if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
+ // MOVZ takes precedence over MOVN.
+ for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
+ if ((Value & ~(0xffffULL << MOVZShift)) == 0)
return false;
+ Value = ~Value;
+ if (RegWidth == 32)
Value &= 0xffffffffULL;
- }
- return isValidImm(RegWidth, Value, UImm16, Shift);
+ return (Value & ~(0xffffULL << Shift)) == 0;
}
- bool isMSRWithReg() const {
+ bool isFPImm() const { return Kind == k_FPImm; }
+ bool isBarrier() const { return Kind == k_Barrier; }
+ bool isSysReg() const { return Kind == k_SysReg; }
+ bool isMRSSystemRegister() const {
if (!isSysReg()) return false;
bool IsKnownRegister;
- StringRef Name(SysReg.Data, SysReg.Length);
- A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
+ auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
+ Mapper.fromString(getSysReg(), IsKnownRegister);
return IsKnownRegister;
}
-
- bool isMSRPState() const {
+ bool isMSRSystemRegister() const {
if (!isSysReg()) return false;
bool IsKnownRegister;
- StringRef Name(SysReg.Data, SysReg.Length);
- A64PState::PStateMapper().fromString(Name, IsKnownRegister);
+ auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
+ Mapper.fromString(getSysReg(), IsKnownRegister);
return IsKnownRegister;
}
-
- bool isMRS() const {
+ bool isSystemPStateField() const {
if (!isSysReg()) return false;
- // First check against specific MSR-only (write-only) registers
bool IsKnownRegister;
- StringRef Name(SysReg.Data, SysReg.Length);
- A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
+ AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
return IsKnownRegister;
}
+ bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
+ bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
+ bool isVectorRegLo() const {
+ return Kind == k_Register && Reg.isVector &&
+ AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
+ Reg.RegNum);
+ }
+ bool isGPR32as64() const {
+ return Kind == k_Register && !Reg.isVector &&
+ AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
+ }
- bool isPRFM() const {
- if (!isImm()) return false;
+ bool isGPR64sp0() const {
+ return Kind == k_Register && !Reg.isVector &&
+ AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
+ }
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ /// Is this a vector list with the type implicit (presumably attached to the
+ /// instruction itself)?
+ template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
+ return Kind == k_VectorList && VectorList.Count == NumRegs &&
+ !VectorList.ElementKind;
+ }
- if (!CE)
+ template <unsigned NumRegs, unsigned NumElements, char ElementKind>
+ bool isTypedVectorList() const {
+ if (Kind != k_VectorList)
return false;
-
- return CE->getValue() >= 0 && CE->getValue() <= 31;
+ if (VectorList.Count != NumRegs)
+ return false;
+ if (VectorList.ElementKind != ElementKind)
+ return false;
+ return VectorList.NumElements == NumElements;
}
- template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
- if (!isShiftOrExtend()) return false;
-
- if (ShiftExtend.ShiftType != SHKind)
+ bool isVectorIndex1() const {
+ return Kind == k_VectorIndex && VectorIndex.Val == 1;
+ }
+ bool isVectorIndexB() const {
+ return Kind == k_VectorIndex && VectorIndex.Val < 16;
+ }
+ bool isVectorIndexH() const {
+ return Kind == k_VectorIndex && VectorIndex.Val < 8;
+ }
+ bool isVectorIndexS() const {
+ return Kind == k_VectorIndex && VectorIndex.Val < 4;
+ }
+ bool isVectorIndexD() const {
+ return Kind == k_VectorIndex && VectorIndex.Val < 2;
+ }
+ bool isToken() const override { return Kind == k_Token; }
+ bool isTokenEqual(StringRef Str) const {
+ return Kind == k_Token && getToken() == Str;
+ }
+ bool isSysCR() const { return Kind == k_SysCR; }
+ bool isPrefetch() const { return Kind == k_Prefetch; }
+ bool isShiftExtend() const { return Kind == k_ShiftExtend; }
+ bool isShifter() const {
+ if (!isShiftExtend())
return false;
- return ShiftExtend.Amount <= 4;
+ AArch64_AM::ShiftExtendType ST = getShiftExtendType();
+ return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
+ ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
+ ST == AArch64_AM::MSL);
}
-
- bool isRegExtendLSL() const {
- if (!isShiftOrExtend()) return false;
-
- if (ShiftExtend.ShiftType != A64SE::LSL)
+ bool isExtend() const {
+ if (!isShiftExtend())
return false;
- return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
+ ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
+ ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
+ ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
+ ET == AArch64_AM::LSL) &&
+ getShiftExtendAmount() <= 4;
}
- // if 0 < value <= w, return true
- bool isShrFixedWidth(int w) const {
- if (!isImm())
+ bool isExtend64() const {
+ if (!isExtend())
return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE)
+ // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
+ }
+ bool isExtendLSL64() const {
+ if (!isExtend())
return false;
- int64_t Value = CE->getValue();
- return Value > 0 && Value <= w;
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
+ ET == AArch64_AM::LSL) &&
+ getShiftExtendAmount() <= 4;
}
- bool isShrImm8() const { return isShrFixedWidth(8); }
-
- bool isShrImm16() const { return isShrFixedWidth(16); }
-
- bool isShrImm32() const { return isShrFixedWidth(32); }
-
- bool isShrImm64() const { return isShrFixedWidth(64); }
-
- // if 0 <= value < w, return true
- bool isShlFixedWidth(int w) const {
- if (!isImm())
- return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE)
+ template<int Width> bool isMemXExtend() const {
+ if (!isExtend())
return false;
- int64_t Value = CE->getValue();
- return Value >= 0 && Value < w;
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
+ (getShiftExtendAmount() == Log2_32(Width / 8) ||
+ getShiftExtendAmount() == 0);
}
- bool isShlImm8() const { return isShlFixedWidth(8); }
-
- bool isShlImm16() const { return isShlFixedWidth(16); }
-
- bool isShlImm32() const { return isShlFixedWidth(32); }
-
- bool isShlImm64() const { return isShlFixedWidth(64); }
-
- bool isNeonMovImmShiftLSL() const {
- if (!isShiftOrExtend())
+ template<int Width> bool isMemWExtend() const {
+ if (!isExtend())
return false;
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
+ (getShiftExtendAmount() == Log2_32(Width / 8) ||
+ getShiftExtendAmount() == 0);
+ }
- if (ShiftExtend.ShiftType != A64SE::LSL)
+ template <unsigned width>
+ bool isArithmeticShifter() const {
+ if (!isShifter())
return false;
- // Valid shift amount is 0, 8, 16 and 24.
- return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
+ // An arithmetic shifter is LSL, LSR, or ASR.
+ AArch64_AM::ShiftExtendType ST = getShiftExtendType();
+ return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
+ ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
}
- bool isNeonMovImmShiftLSLH() const {
- if (!isShiftOrExtend())
+ template <unsigned width>
+ bool isLogicalShifter() const {
+ if (!isShifter())
return false;
- if (ShiftExtend.ShiftType != A64SE::LSL)
+ // A logical shifter is LSL, LSR, ASR or ROR.
+ AArch64_AM::ShiftExtendType ST = getShiftExtendType();
+ return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
+ ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
+ getShiftExtendAmount() < width;
+ }
+
+ bool isMovImm32Shifter() const {
+ if (!isShifter())
return false;
- // Valid shift amount is 0 and 8.
- return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
+ // A MOVi shifter is LSL of 0, 16, 32, or 48.
+ AArch64_AM::ShiftExtendType ST = getShiftExtendType();
+ if (ST != AArch64_AM::LSL)
+ return false;
+ uint64_t Val = getShiftExtendAmount();
+ return (Val == 0 || Val == 16);
}
- bool isNeonMovImmShiftMSL() const {
- if (!isShiftOrExtend())
+ bool isMovImm64Shifter() const {
+ if (!isShifter())
return false;
- if (ShiftExtend.ShiftType != A64SE::MSL)
+ // A MOVi shifter is LSL of 0 or 16.
+ AArch64_AM::ShiftExtendType ST = getShiftExtendType();
+ if (ST != AArch64_AM::LSL)
return false;
-
- // Valid shift amount is 8 and 16.
- return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
+ uint64_t Val = getShiftExtendAmount();
+ return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
}
- template <A64Layout::VectorLayout Layout, unsigned Count>
- bool isVectorList() const {
- return Kind == k_VectorList && VectorList.Layout == Layout &&
- VectorList.Count == Count;
+ bool isLogicalVecShifter() const {
+ if (!isShifter())
+ return false;
+
+ // A logical vector shifter is a left shift by 0, 8, 16, or 24.
+ unsigned Shift = getShiftExtendAmount();
+ return getShiftExtendType() == AArch64_AM::LSL &&
+ (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
}
- template <int MemSize> bool isSImm7Scaled() const {
- if (!isImm())
+ bool isLogicalVecHalfWordShifter() const {
+ if (!isLogicalVecShifter())
return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
+ // A logical vector shifter is a left shift by 0 or 8.
+ unsigned Shift = getShiftExtendAmount();
+ return getShiftExtendType() == AArch64_AM::LSL &&
+ (Shift == 0 || Shift == 8);
+ }
- int64_t Val = CE->getValue();
- if (Val % MemSize != 0) return false;
+ bool isMoveVecShifter() const {
+ if (!isShiftExtend())
+ return false;
- Val /= MemSize;
+ // A logical vector shifter is a left shift by 8 or 16.
+ unsigned Shift = getShiftExtendAmount();
+ return getShiftExtendType() == AArch64_AM::MSL &&
+ (Shift == 8 || Shift == 16);
+ }
- return Val >= -64 && Val < 64;
+ // Fallback unscaled operands are for aliases of LDR/STR that fall back
+ // to LDUR/STUR when the offset is not legal for the former but is for
+ // the latter. As such, in addition to checking for being a legal unscaled
+ // address, also check that it is not a legal scaled address. This avoids
+ // ambiguity in the matcher.
+ template<int Width>
+ bool isSImm9OffsetFB() const {
+ return isSImm9() && !isUImm12Offset<Width / 8>();
}
- template<int BitWidth>
- bool isSImm() const {
- if (!isImm()) return false;
+ bool isAdrpLabel() const {
+ // Validation was handled during parsing, so we just sanity check that
+ // something didn't go haywire.
+ if (!isImm())
+ return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
+ int64_t Val = CE->getValue();
+ int64_t Min = - (4096 * (1LL << (21 - 1)));
+ int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
+ return (Val % 4096) == 0 && Val >= Min && Val <= Max;
+ }
- return CE->getValue() >= -(1LL << (BitWidth - 1))
- && CE->getValue() < (1LL << (BitWidth - 1));
+ return true;
}
- template<int bitWidth>
- bool isUImm() const {
- if (!isImm()) return false;
+ bool isAdrLabel() const {
+ // Validation was handled during parsing, so we just sanity check that
+ // something didn't go haywire.
+ if (!isImm())
+ return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
+ int64_t Val = CE->getValue();
+ int64_t Min = - (1LL << (21 - 1));
+ int64_t Max = ((1LL << (21 - 1)) - 1);
+ return Val >= Min && Val <= Max;
+ }
- return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
+ return true;
}
- bool isUImm() const {
- if (!isImm()) return false;
+ void addExpr(MCInst &Inst, const MCExpr *Expr) const {
+ // Add as immediates when possible. Null MCExpr = 0.
+ if (!Expr)
+ Inst.addOperand(MCOperand::CreateImm(0));
+ else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ else
+ Inst.addOperand(MCOperand::CreateExpr(Expr));
+ }
- return isa<MCConstantExpr>(getImm());
+ void addRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getReg()));
}
- bool isNeonUImm64Mask() const {
- if (!isImm())
- return false;
+ void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ assert(
+ AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE)
- return false;
+ const MCRegisterInfo *RI = Ctx.getRegisterInfo();
+ uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
+ RI->getEncodingValue(getReg()));
- uint64_t Value = CE->getValue();
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ }
- // i64 value with each byte being either 0x00 or 0xff.
- for (unsigned i = 0; i < 8; ++i, Value >>= 8)
- if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
- return false;
- return true;
+ void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ assert(
+ AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
+ Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
}
- // if value == N, return true
- template<int N>
- bool isExactImm() const {
- if (!isImm()) return false;
+ void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ assert(
+ AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
+ Inst.addOperand(MCOperand::CreateReg(getReg()));
+ }
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
+ void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getReg()));
+ }
- return CE->getValue() == N;
+ template <unsigned NumRegs>
+ void addVectorList64Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
+ AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
+ unsigned FirstReg = FirstRegs[NumRegs - 1];
+
+ Inst.addOperand(
+ MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
}
- bool isFPZeroIZero() const {
- return isFPZero();
+ template <unsigned NumRegs>
+ void addVectorList128Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
+ AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
+ unsigned FirstReg = FirstRegs[NumRegs - 1];
+
+ Inst.addOperand(
+ MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
}
- static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
- unsigned ShiftAmount,
- bool ImplicitAmount,
- SMLoc S,SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
- Op->ImmWithLSL.Val = Val;
- Op->ImmWithLSL.ShiftAmount = ShiftAmount;
- Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
- return Op;
+ void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
}
- static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
- SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
- Op->CondCode.Code = Code;
- return Op;
+ void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
}
- static AArch64Operand *CreateFPImm(double Val,
- SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
- Op->FPImm.Val = Val;
- return Op;
+ void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
}
- static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
- Op->Imm.Val = Val;
- return Op;
+ void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
}
- static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
- Op->Reg.RegNum = RegNum;
- return Op;
+ void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
}
- static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
- Op->Reg.RegNum = RegNum;
- return Op;
+ void addImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // If this is a pageoff symrefexpr with an addend, adjust the addend
+ // to be only the page-offset portion. Otherwise, just add the expr
+ // as-is.
+ addExpr(Inst, getImm());
}
- static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
- unsigned Amount,
- bool ImplicitAmount,
- SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
- Op->ShiftExtend.ShiftType = ShiftTyp;
- Op->ShiftExtend.Amount = Amount;
- Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
- return Op;
+ void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ if (isShiftedImm()) {
+ addExpr(Inst, getShiftedImmVal());
+ Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
+ } else {
+ addExpr(Inst, getImm());
+ Inst.addOperand(MCOperand::CreateImm(0));
+ }
}
- static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
- AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
- Op->Tok.Data = Str.data();
- Op->Tok.Length = Str.size();
- return Op;
+ void addCondCodeOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getCondCode()));
}
- static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
- A64Layout::VectorLayout Layout,
- SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E);
- Op->VectorList.RegNum = RegNum;
- Op->VectorList.Count = Count;
- Op->VectorList.Layout = Layout;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
+ void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ addExpr(Inst, getImm());
+ else
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
}
- static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
- AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
- Op->Tok.Data = Str.data();
- Op->Tok.Length = Str.size();
- return Op;
+ void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
+ addImmOperands(Inst, N);
}
+ template<int Scale>
+ void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
- void addExpr(MCInst &Inst, const MCExpr *Expr) const {
- // Add as immediates when possible.
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
- Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
- else
- Inst.addOperand(MCOperand::CreateExpr(Expr));
+ if (!MCE) {
+ Inst.addOperand(MCOperand::CreateExpr(getImm()));
+ return;
+ }
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
}
- template<unsigned RegWidth>
- void addBFILSBOperands(MCInst &Inst, unsigned N) const {
+ void addSImm9Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
- Inst.addOperand(MCOperand::CreateImm(EncodedVal));
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
+ void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
}
- void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
+ void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
-
- uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
-
- Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
}
- void addCondCodeOperands(MCInst &Inst, unsigned N) const {
+ void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getCondCode()));
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
}
- void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
+ void addImm0_7Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
-
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
+ void addImm1_8Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
+ }
- APFloat RealVal(FPImm.Val);
- uint32_t ImmVal;
- A64Imms::isFPImm(RealVal, ImmVal);
+ void addImm0_15Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
+ }
- Inst.addOperand(MCOperand::CreateImm(ImmVal));
+ void addImm1_16Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addFPZeroOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands");
- Inst.addOperand(MCOperand::CreateImm(0));
+ void addImm0_31Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addFPZeroIZeroOperands(MCInst &Inst, unsigned N) const {
- addFPZeroOperands(Inst, N);
+ void addImm1_31Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
+ void addImm1_32Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- unsigned Encoded = A64InvertCondCode(getCondCode());
- Inst.addOperand(MCOperand::CreateImm(Encoded));
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addRegOperands(MCInst &Inst, unsigned N) const {
+ void addImm0_63Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getReg()));
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addImmOperands(MCInst &Inst, unsigned N) const {
+ void addImm1_63Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- template<int MemSize>
- void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
+ void addImm1_64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
+ }
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- uint64_t Val = CE->getValue() / MemSize;
- Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
+ void addImm0_127Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- template<int BitWidth>
- void addSImmOperands(MCInst &Inst, unsigned N) const {
+ void addImm0_255Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
+ }
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- uint64_t Val = CE->getValue();
- Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
+ void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
- assert (N == 1 && "Invalid number of operands!");
+ void addImm32_63Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
+ }
- addExpr(Inst, ImmWithLSL.Val);
+ void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid logical immediate operand!");
+ uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
+ Inst.addOperand(MCOperand::CreateImm(encoding));
}
- template<unsigned field_width, unsigned scale>
- void addLabelOperands(MCInst &Inst, unsigned N) const {
+ void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid logical immediate operand!");
+ uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
+ Inst.addOperand(MCOperand::CreateImm(encoding));
+ }
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
+ void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid immediate operand!");
+ uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
+ Inst.addOperand(MCOperand::CreateImm(encoding));
+ }
- if (!CE) {
- addExpr(Inst, Imm.Val);
+ void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
+ // Branch operands don't encode the low bits, so shift them off
+ // here. If it's a label, however, just put it on directly as there's
+ // not enough information now to do anything.
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE) {
+ addExpr(Inst, getImm());
return;
}
-
- int64_t Val = CE->getValue();
- assert(Val % scale == 0 && "Unaligned immediate in instruction");
- Val /= scale;
-
- Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
}
- template<int MemSize>
- void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
+ void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
+ // Branch operands don't encode the low bits, so shift them off
+ // here. If it's a label, however, just put it on directly as there's
+ // not enough information now to do anything.
assert(N == 1 && "Invalid number of operands!");
-
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
- Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
- } else {
- Inst.addOperand(MCOperand::CreateExpr(getImm()));
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE) {
+ addExpr(Inst, getImm());
+ return;
}
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
}
- template<unsigned RegWidth>
- void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands");
- const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
+ void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
+ // Branch operands don't encode the low bits, so shift them off
+ // here. If it's a label, however, just put it on directly as there's
+ // not enough information now to do anything.
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE) {
+ addExpr(Inst, getImm());
+ return;
+ }
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
+ }
- uint32_t Bits;
- A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
+ void addFPImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getFPImm()));
+ }
- Inst.addOperand(MCOperand::CreateImm(Bits));
+ void addBarrierOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getBarrier()));
}
- void addMRSOperands(MCInst &Inst, unsigned N) const {
+ void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
bool Valid;
- StringRef Name(SysReg.Data, SysReg.Length);
- uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
+ auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
+ uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
Inst.addOperand(MCOperand::CreateImm(Bits));
}
- void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
+ void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
bool Valid;
- StringRef Name(SysReg.Data, SysReg.Length);
- uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
+ auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
+ uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
Inst.addOperand(MCOperand::CreateImm(Bits));
}
- void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
+ void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
bool Valid;
- StringRef Name(SysReg.Data, SysReg.Length);
- uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
+ uint32_t Bits =
+ AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
Inst.addOperand(MCOperand::CreateImm(Bits));
}
- void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 2 && "Invalid number of operands!");
-
- addExpr(Inst, ImmWithLSL.Val);
-
- AArch64MCExpr::VariantKind Variant;
- if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
- Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
- return;
- }
-
- // We know it's relocated
- switch (Variant) {
- case AArch64MCExpr::VK_AARCH64_ABS_G0:
- case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
- case AArch64MCExpr::VK_AARCH64_SABS_G0:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
- case AArch64MCExpr::VK_AARCH64_TPREL_G0:
- case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
- Inst.addOperand(MCOperand::CreateImm(0));
- break;
- case AArch64MCExpr::VK_AARCH64_ABS_G1:
- case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
- case AArch64MCExpr::VK_AARCH64_SABS_G1:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
- case AArch64MCExpr::VK_AARCH64_TPREL_G1:
- case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
- Inst.addOperand(MCOperand::CreateImm(1));
- break;
- case AArch64MCExpr::VK_AARCH64_ABS_G2:
- case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
- case AArch64MCExpr::VK_AARCH64_SABS_G2:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
- case AArch64MCExpr::VK_AARCH64_TPREL_G2:
- Inst.addOperand(MCOperand::CreateImm(2));
- break;
- case AArch64MCExpr::VK_AARCH64_ABS_G3:
- Inst.addOperand(MCOperand::CreateImm(3));
- break;
- default: llvm_unreachable("Inappropriate move wide relocation");
- }
+ void addSysCROperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getSysCR()));
}
- template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
- void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
- assert(N == 2 && "Invalid number of operands!");
- int UImm16, Shift;
-
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- uint64_t Value = CE->getValue();
-
- if (RegWidth == 32) {
- Value &= 0xffffffffULL;
- }
+ void addPrefetchOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
+ }
- bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
- (void)Valid;
- assert(Valid && "Invalid immediates should have been weeded out by now");
+ void addShifterOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ unsigned Imm =
+ AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
- Inst.addOperand(MCOperand::CreateImm(UImm16));
- Inst.addOperand(MCOperand::CreateImm(Shift));
+ void addExtendOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
+ unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
+ Inst.addOperand(MCOperand::CreateImm(Imm));
}
- void addPRFMOperands(MCInst &Inst, unsigned N) const {
+ void addExtend64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
+ unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- assert(CE->getValue() >= 0 && CE->getValue() <= 31
- && "PRFM operand should be 5-bits");
+ void addMemExtendOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
+ Inst.addOperand(MCOperand::CreateImm(IsSigned));
+ Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
+ }
- Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ // For 8-bit load/store instructions with a register offset, both the
+ // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
+ // they're disambiguated by whether the shift was explicit or implicit rather
+ // than its size.
+ void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
+ Inst.addOperand(MCOperand::CreateImm(IsSigned));
+ Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
}
- // For Add-sub (extended register) operands.
- void addRegExtendOperands(MCInst &Inst, unsigned N) const {
+ template<int Shift>
+ void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
+ const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
+ uint64_t Value = CE->getValue();
+ Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
}
- // For Vector Immediates shifted imm operands.
- void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
+ template<int Shift>
+ void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
- llvm_unreachable("Invalid shift amount for vector immediate inst.");
-
- // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
- int64_t Imm = ShiftExtend.Amount / 8;
- Inst.addOperand(MCOperand::CreateImm(Imm));
+ const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
+ uint64_t Value = CE->getValue();
+ Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
}
- void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
+ void print(raw_ostream &OS) const override;
- if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
- llvm_unreachable("Invalid shift amount for vector immediate inst.");
-
- // Encode LSLH shift amount 0, 8 as 0, 1.
- int64_t Imm = ShiftExtend.Amount / 8;
- Inst.addOperand(MCOperand::CreateImm(Imm));
+ static AArch64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
+ MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_Token, Ctx);
+ Op->Tok.Data = Str.data();
+ Op->Tok.Length = Str.size();
+ Op->Tok.IsSuffix = IsSuffix;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
}
- void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
-
- if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
- llvm_unreachable("Invalid shift amount for vector immediate inst.");
+ static AArch64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
+ SMLoc E, MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_Register, Ctx);
+ Op->Reg.RegNum = RegNum;
+ Op->Reg.isVector = isVector;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
- // Encode MSL shift amount 8, 16 as 0, 1.
- int64_t Imm = ShiftExtend.Amount / 8 - 1;
- Inst.addOperand(MCOperand::CreateImm(Imm));
+ static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
+ unsigned NumElements, char ElementKind,
+ SMLoc S, SMLoc E, MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_VectorList, Ctx);
+ Op->VectorList.RegNum = RegNum;
+ Op->VectorList.Count = Count;
+ Op->VectorList.NumElements = NumElements;
+ Op->VectorList.ElementKind = ElementKind;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
}
- // For the extend in load-store (register offset) instructions.
- template<unsigned MemSize>
- void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
- addAddrRegExtendOperands(Inst, N, MemSize);
+ static AArch64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
+ MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_VectorIndex, Ctx);
+ Op->VectorIndex.Val = Idx;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
}
- void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
- unsigned MemSize) const {
- assert(N == 1 && "Invalid number of operands!");
+ static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
+ MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_Immediate, Ctx);
+ Op->Imm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
- // First bit of Option is set in instruction classes, the high two bits are
- // as follows:
- unsigned OptionHi = 0;
- switch (ShiftExtend.ShiftType) {
- case A64SE::UXTW:
- case A64SE::LSL:
- OptionHi = 1;
- break;
- case A64SE::SXTW:
- case A64SE::SXTX:
- OptionHi = 3;
- break;
- default:
- llvm_unreachable("Invalid extend type for register offset");
- }
+ static AArch64Operand *CreateShiftedImm(const MCExpr *Val,
+ unsigned ShiftAmount, SMLoc S,
+ SMLoc E, MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_ShiftedImm, Ctx);
+ Op->ShiftedImm .Val = Val;
+ Op->ShiftedImm.ShiftAmount = ShiftAmount;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
- unsigned S = 0;
- if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
- S = 1;
- else if (MemSize != 1 && ShiftExtend.Amount != 0)
- S = 1;
+ static AArch64Operand *CreateCondCode(AArch64CC::CondCode Code, SMLoc S,
+ SMLoc E, MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_CondCode, Ctx);
+ Op->CondCode.Code = Code;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
- Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
+ static AArch64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_FPImm, Ctx);
+ Op->FPImm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
}
- void addShiftOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
+ static AArch64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_Barrier, Ctx);
+ Op->Barrier.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
}
- void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
+ static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S,
+ uint64_t FeatureBits, MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_SysReg, Ctx);
+ Op->SysReg.Data = Str.data();
+ Op->SysReg.Length = Str.size();
+ Op->SysReg.FeatureBits = FeatureBits;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
+ }
- // A bit from each byte in the constant forms the encoded immediate
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- uint64_t Value = CE->getValue();
+ static AArch64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
+ MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_SysCR, Ctx);
+ Op->SysCRImm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
- unsigned Imm = 0;
- for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
- Imm |= (Value & 1) << i;
- }
- Inst.addOperand(MCOperand::CreateImm(Imm));
+ static AArch64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_Prefetch, Ctx);
+ Op->Prefetch.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
}
- void addVectorListOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
+ static AArch64Operand *CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp,
+ unsigned Val, bool HasExplicitAmount,
+ SMLoc S, SMLoc E, MCContext &Ctx) {
+ AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, Ctx);
+ Op->ShiftExtend.Type = ShOp;
+ Op->ShiftExtend.Amount = Val;
+ Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
}
};
} // end anonymous namespace.
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- StringRef Mnemonic) {
+void AArch64Operand::print(raw_ostream &OS) const {
+ switch (Kind) {
+ case k_FPImm:
+ OS << "<fpimm " << getFPImm() << "("
+ << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
+ break;
+ case k_Barrier: {
+ bool Valid;
+ StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
+ if (Valid)
+ OS << "<barrier " << Name << ">";
+ else
+ OS << "<barrier invalid #" << getBarrier() << ">";
+ break;
+ }
+ case k_Immediate:
+ getImm()->print(OS);
+ break;
+ case k_ShiftedImm: {
+ unsigned Shift = getShiftedImmShift();
+ OS << "<shiftedimm ";
+ getShiftedImmVal()->print(OS);
+ OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
+ break;
+ }
+ case k_CondCode:
+ OS << "<condcode " << getCondCode() << ">";
+ break;
+ case k_Register:
+ OS << "<register " << getReg() << ">";
+ break;
+ case k_VectorList: {
+ OS << "<vectorlist ";
+ unsigned Reg = getVectorListStart();
+ for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
+ OS << Reg + i << " ";
+ OS << ">";
+ break;
+ }
+ case k_VectorIndex:
+ OS << "<vectorindex " << getVectorIndex() << ">";
+ break;
+ case k_SysReg:
+ OS << "<sysreg: " << getSysReg() << '>';
+ break;
+ case k_Token:
+ OS << "'" << getToken() << "'";
+ break;
+ case k_SysCR:
+ OS << "c" << getSysCR();
+ break;
+ case k_Prefetch: {
+ bool Valid;
+ StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
+ if (Valid)
+ OS << "<prfop " << Name << ">";
+ else
+ OS << "<prfop invalid #" << getPrefetch() << ">";
+ break;
+ }
+ case k_ShiftExtend: {
+ OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
+ << getShiftExtendAmount();
+ if (!hasShiftExtendAmount())
+ OS << "<imp>";
+ OS << '>';
+ break;
+ }
+ }
+}
- // See if the operand has a custom parser
- OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
+/// @name Auto-generated Match Functions
+/// {
- // It could either succeed, fail or just not care.
- if (ResTy != MatchOperand_NoMatch)
- return ResTy;
+static unsigned MatchRegisterName(StringRef Name);
- switch (getLexer().getKind()) {
- default:
- Error(Parser.getTok().getLoc(), "unexpected token in operand");
- return MatchOperand_ParseFail;
- case AsmToken::Identifier: {
- // It might be in the LSL/UXTB family ...
- OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
+/// }
+
+static unsigned matchVectorRegName(StringRef Name) {
+ return StringSwitch<unsigned>(Name)
+ .Case("v0", AArch64::Q0)
+ .Case("v1", AArch64::Q1)
+ .Case("v2", AArch64::Q2)
+ .Case("v3", AArch64::Q3)
+ .Case("v4", AArch64::Q4)
+ .Case("v5", AArch64::Q5)
+ .Case("v6", AArch64::Q6)
+ .Case("v7", AArch64::Q7)
+ .Case("v8", AArch64::Q8)
+ .Case("v9", AArch64::Q9)
+ .Case("v10", AArch64::Q10)
+ .Case("v11", AArch64::Q11)
+ .Case("v12", AArch64::Q12)
+ .Case("v13", AArch64::Q13)
+ .Case("v14", AArch64::Q14)
+ .Case("v15", AArch64::Q15)
+ .Case("v16", AArch64::Q16)
+ .Case("v17", AArch64::Q17)
+ .Case("v18", AArch64::Q18)
+ .Case("v19", AArch64::Q19)
+ .Case("v20", AArch64::Q20)
+ .Case("v21", AArch64::Q21)
+ .Case("v22", AArch64::Q22)
+ .Case("v23", AArch64::Q23)
+ .Case("v24", AArch64::Q24)
+ .Case("v25", AArch64::Q25)
+ .Case("v26", AArch64::Q26)
+ .Case("v27", AArch64::Q27)
+ .Case("v28", AArch64::Q28)
+ .Case("v29", AArch64::Q29)
+ .Case("v30", AArch64::Q30)
+ .Case("v31", AArch64::Q31)
+ .Default(0);
+}
- // We can only continue if no tokens were eaten.
- if (GotShift != MatchOperand_NoMatch)
- return GotShift;
+static bool isValidVectorKind(StringRef Name) {
+ return StringSwitch<bool>(Name.lower())
+ .Case(".8b", true)
+ .Case(".16b", true)
+ .Case(".4h", true)
+ .Case(".8h", true)
+ .Case(".2s", true)
+ .Case(".4s", true)
+ .Case(".1d", true)
+ .Case(".2d", true)
+ .Case(".1q", true)
+ // Accept the width neutral ones, too, for verbose syntax. If those
+ // aren't used in the right places, the token operand won't match so
+ // all will work out.
+ .Case(".b", true)
+ .Case(".h", true)
+ .Case(".s", true)
+ .Case(".d", true)
+ .Default(false);
+}
- // ... or it might be a register ...
- uint32_t NumLanes = 0;
- OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
- assert(GotReg != MatchOperand_ParseFail
- && "register parsing shouldn't partially succeed");
-
- if (GotReg == MatchOperand_Success) {
- if (Parser.getTok().is(AsmToken::LBrac))
- return ParseNEONLane(Operands, NumLanes);
- else
- return MatchOperand_Success;
- }
- // ... or it might be a symbolish thing
- }
- // Fall through
- case AsmToken::LParen: // E.g. (strcmp-4)
- case AsmToken::Integer: // 1f, 2b labels
- case AsmToken::String: // quoted labels
- case AsmToken::Dot: // . is Current location
- case AsmToken::Dollar: // $ is PC
- case AsmToken::Colon: {
- SMLoc StartLoc = Parser.getTok().getLoc();
- SMLoc EndLoc;
- const MCExpr *ImmVal = 0;
-
- if (ParseImmediate(ImmVal) != MatchOperand_Success)
- return MatchOperand_ParseFail;
+static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
+ char &ElementKind) {
+ assert(isValidVectorKind(Name));
- EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
- return MatchOperand_Success;
- }
- case AsmToken::Hash: { // Immediates
- SMLoc StartLoc = Parser.getTok().getLoc();
- SMLoc EndLoc;
- const MCExpr *ImmVal = 0;
- Parser.Lex();
+ ElementKind = Name.lower()[Name.size() - 1];
+ NumElements = 0;
- if (ParseImmediate(ImmVal) != MatchOperand_Success)
- return MatchOperand_ParseFail;
+ if (Name.size() == 2)
+ return;
- EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
- return MatchOperand_Success;
+ // Parse the lane count
+ Name = Name.drop_front();
+ while (isdigit(Name.front())) {
+ NumElements = 10 * NumElements + (Name.front() - '0');
+ Name = Name.drop_front();
}
- case AsmToken::LBrac: {
- SMLoc Loc = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateToken("[", Loc));
- Parser.Lex(); // Eat '['
+}
- // There's no comma after a '[', so we can parse the next operand
- // immediately.
- return ParseOperand(Operands, Mnemonic);
- }
- // The following will likely be useful later, but not in very early cases
- case AsmToken::LCurly: // SIMD vector list is not parsed here
- llvm_unreachable("Don't know how to deal with '{' in operand");
- return MatchOperand_ParseFail;
+bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
+ SMLoc &EndLoc) {
+ StartLoc = getLoc();
+ RegNo = tryParseRegister();
+ EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ return (RegNo == (unsigned)-1);
+}
+
+/// tryParseRegister - Try to parse a register name. The token must be an
+/// Identifier when called, and if it is a register name the token is eaten and
+/// the register is added to the operand list.
+int AArch64AsmParser::tryParseRegister() {
+ const AsmToken &Tok = Parser.getTok();
+ assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+
+ std::string lowerCase = Tok.getString().lower();
+ unsigned RegNum = MatchRegisterName(lowerCase);
+ // Also handle a few aliases of registers.
+ if (RegNum == 0)
+ RegNum = StringSwitch<unsigned>(lowerCase)
+ .Case("fp", AArch64::FP)
+ .Case("lr", AArch64::LR)
+ .Case("x31", AArch64::XZR)
+ .Case("w31", AArch64::WZR)
+ .Default(0);
+
+ if (RegNum == 0)
+ return -1;
+
+ Parser.Lex(); // Eat identifier token.
+ return RegNum;
+}
+
+/// tryMatchVectorRegister - Try to parse a vector register name with optional
+/// kind specifier. If it is a register specifier, eat the token and return it.
+int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ TokError("vector register expected");
+ return -1;
+ }
+
+ StringRef Name = Parser.getTok().getString();
+ // If there is a kind specifier, it's separated from the register name by
+ // a '.'.
+ size_t Start = 0, Next = Name.find('.');
+ StringRef Head = Name.slice(Start, Next);
+ unsigned RegNum = matchVectorRegName(Head);
+ if (RegNum) {
+ if (Next != StringRef::npos) {
+ Kind = Name.slice(Next, StringRef::npos);
+ if (!isValidVectorKind(Kind)) {
+ TokError("invalid vector kind qualifier");
+ return -1;
+ }
+ }
+ Parser.Lex(); // Eat the register token.
+ return RegNum;
}
+
+ if (expected)
+ TokError("vector register expected");
+ return -1;
}
+/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
- if (getLexer().is(AsmToken::Colon)) {
- AArch64MCExpr::VariantKind RefKind;
+AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
+ SMLoc S = getLoc();
- OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
- if (ResTy != MatchOperand_Success)
- return ResTy;
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ Error(S, "Expected cN operand where 0 <= N <= 15");
+ return MatchOperand_ParseFail;
+ }
- const MCExpr *SubExprVal;
- if (getParser().parseExpression(SubExprVal))
- return MatchOperand_ParseFail;
+ StringRef Tok = Parser.getTok().getIdentifier();
+ if (Tok[0] != 'c' && Tok[0] != 'C') {
+ Error(S, "Expected cN operand where 0 <= N <= 15");
+ return MatchOperand_ParseFail;
+ }
- ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
- return MatchOperand_Success;
+ uint32_t CRNum;
+ bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
+ if (BadNum || CRNum > 15) {
+ Error(S, "Expected cN operand where 0 <= N <= 15");
+ return MatchOperand_ParseFail;
}
- // No weird AArch64MCExpr prefix
- return getParser().parseExpression(ExprVal)
- ? MatchOperand_ParseFail : MatchOperand_Success;
+ Parser.Lex(); // Eat identifier token.
+ Operands.push_back(
+ AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
+ return MatchOperand_Success;
}
-// A lane attached to a NEON register. "[N]", which should yield three tokens:
-// '[', N, ']'. A hash is not allowed to precede the immediate here.
+/// tryParsePrefetch - Try to parse a prefetch operand.
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- uint32_t NumLanes) {
- SMLoc Loc = Parser.getTok().getLoc();
+AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ const AsmToken &Tok = Parser.getTok();
+ // Either an identifier for named values or a 5-bit immediate.
+ bool Hash = Tok.is(AsmToken::Hash);
+ if (Hash || Tok.is(AsmToken::Integer)) {
+ if (Hash)
+ Parser.Lex(); // Eat hash token.
+ const MCExpr *ImmVal;
+ if (getParser().parseExpression(ImmVal))
+ return MatchOperand_ParseFail;
- assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
- Operands.push_back(AArch64Operand::CreateToken("[", Loc));
- Parser.Lex(); // Eat '['
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!MCE) {
+ TokError("immediate value expected for prefetch operand");
+ return MatchOperand_ParseFail;
+ }
+ unsigned prfop = MCE->getValue();
+ if (prfop > 31) {
+ TokError("prefetch operand out of range, [0,31] expected");
+ return MatchOperand_ParseFail;
+ }
- if (Parser.getTok().isNot(AsmToken::Integer)) {
- Error(Parser.getTok().getLoc(), "expected lane number");
+ Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
+ return MatchOperand_Success;
+ }
+
+ if (Tok.isNot(AsmToken::Identifier)) {
+ TokError("pre-fetch hint expected");
return MatchOperand_ParseFail;
}
- if (Parser.getTok().getIntVal() >= NumLanes) {
- Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
+ bool Valid;
+ unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
+ if (!Valid) {
+ TokError("pre-fetch hint expected");
return MatchOperand_ParseFail;
}
- const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
- getContext());
- SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat actual lane
- SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
+ Parser.Lex(); // Eat identifier token.
+ Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
+ return MatchOperand_Success;
+}
+/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
+/// instruction.
+AArch64AsmParser::OperandMatchResultTy
+AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ const MCExpr *Expr;
- if (Parser.getTok().isNot(AsmToken::RBrac)) {
- Error(Parser.getTok().getLoc(), "expected ']' after lane");
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex(); // Eat hash token.
+ }
+
+ if (parseSymbolicImmVal(Expr))
return MatchOperand_ParseFail;
+
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
+ if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
+ ELFRefKind == AArch64MCExpr::VK_INVALID) {
+ // No modifier was specified at all; this is the syntax for an ELF basic
+ // ADRP relocation (unfortunately).
+ Expr =
+ AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
+ } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
+ DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
+ Addend != 0) {
+ Error(S, "gotpage label reference not allowed an addend");
+ return MatchOperand_ParseFail;
+ } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
+ DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
+ DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
+ ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
+ ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
+ ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
+ // The operand must be an @page or @gotpage qualified symbolref.
+ Error(S, "page or gotpage label reference expected");
+ return MatchOperand_ParseFail;
+ }
}
- Operands.push_back(AArch64Operand::CreateToken("]", Loc));
- Parser.Lex(); // Eat ']'
+ // We have either a label reference possibly with addend or an immediate. The
+ // addend is a raw value here. The linker will adjust it to only reference the
+ // page.
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
return MatchOperand_Success;
}
+/// tryParseAdrLabel - Parse and validate a source label for the ADR
+/// instruction.
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
- assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
- Parser.Lex();
+AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ const MCExpr *Expr;
- if (getLexer().isNot(AsmToken::Identifier)) {
- Error(Parser.getTok().getLoc(),
- "expected relocation specifier in operand after ':'");
- return MatchOperand_ParseFail;
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex(); // Eat hash token.
}
- std::string LowerCase = Parser.getTok().getIdentifier().lower();
- RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
- .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
- .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
- .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
- .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
- .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
- .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
- .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
- .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
- .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
- .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
- .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
- .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
- .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
- .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
- .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
- .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
- .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
- .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
- .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
- .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
- .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
- .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
- .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
- .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
- .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
- .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
- .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
- .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
- .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
- .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
- .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
- .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
- .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
- .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
- .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
- .Default(AArch64MCExpr::VK_AARCH64_None);
-
- if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
- Error(Parser.getTok().getLoc(),
- "expected relocation specifier in operand after ':'");
+ if (getParser().parseExpression(Expr))
return MatchOperand_ParseFail;
- }
- Parser.Lex(); // Eat identifier
- if (getLexer().isNot(AsmToken::Colon)) {
- Error(Parser.getTok().getLoc(),
- "expected ':' after relocation specifier");
- return MatchOperand_ParseFail;
- }
- Parser.Lex();
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
+
return MatchOperand_Success;
}
+/// tryParseFPImm - A floating point immediate expression operand.
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseImmWithLSLOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
+ SMLoc S = getLoc();
- SMLoc S = Parser.getTok().getLoc();
+ bool Hash = false;
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex(); // Eat '#'
+ Hash = true;
+ }
+
+ // Handle negation, as that still comes through as a separate token.
+ bool isNegative = false;
+ if (Parser.getTok().is(AsmToken::Minus)) {
+ isNegative = true;
+ Parser.Lex();
+ }
+ const AsmToken &Tok = Parser.getTok();
+ if (Tok.is(AsmToken::Real)) {
+ APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
+ uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
+ // If we had a '-' in front, toggle the sign bit.
+ IntVal ^= (uint64_t)isNegative << 63;
+ int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
+ Parser.Lex(); // Eat the token.
+ // Check for out of range values. As an exception, we let Zero through,
+ // as we handle that special case in post-processing before matching in
+ // order to use the zero register for it.
+ if (Val == -1 && !RealVal.isZero()) {
+ TokError("expected compatible register or floating-point constant");
+ return MatchOperand_ParseFail;
+ }
+ Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
+ return MatchOperand_Success;
+ }
+ if (Tok.is(AsmToken::Integer)) {
+ int64_t Val;
+ if (!isNegative && Tok.getString().startswith("0x")) {
+ Val = Tok.getIntVal();
+ if (Val > 255 || Val < 0) {
+ TokError("encoded floating point value out of range");
+ return MatchOperand_ParseFail;
+ }
+ } else {
+ APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
+ uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
+ // If we had a '-' in front, toggle the sign bit.
+ IntVal ^= (uint64_t)isNegative << 63;
+ Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
+ }
+ Parser.Lex(); // Eat the token.
+ Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
+ return MatchOperand_Success;
+ }
+
+ if (!Hash)
+ return MatchOperand_NoMatch;
+
+ TokError("invalid floating point immediate");
+ return MatchOperand_ParseFail;
+}
+
+/// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
+AArch64AsmParser::OperandMatchResultTy
+AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
+ SMLoc S = getLoc();
if (Parser.getTok().is(AsmToken::Hash))
Parser.Lex(); // Eat '#'
@@ -1491,11 +2091,21 @@ AArch64AsmParser::ParseImmWithLSLOperand(
return MatchOperand_NoMatch;
const MCExpr *Imm;
- if (ParseImmediate(Imm) != MatchOperand_Success)
+ if (parseSymbolicImmVal(Imm))
return MatchOperand_ParseFail;
else if (Parser.getTok().isNot(AsmToken::Comma)) {
+ uint64_t ShiftAmount = 0;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
+ if (MCE) {
+ int64_t Val = MCE->getValue();
+ if (Val > 0xfff && (Val & 0xfff) == 0) {
+ Imm = MCConstantExpr::Create(Val >> 12, getContext());
+ ShiftAmount = 12;
+ }
+ }
SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
+ Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
+ getContext()));
return MatchOperand_Success;
}
@@ -1503,18 +2113,22 @@ AArch64AsmParser::ParseImmWithLSLOperand(
Parser.Lex();
// The optional operand must be "lsl #N" where N is non-negative.
- if (Parser.getTok().is(AsmToken::Identifier)
- && Parser.getTok().getIdentifier().equals_lower("lsl")) {
- Parser.Lex();
+ if (!Parser.getTok().is(AsmToken::Identifier) ||
+ !Parser.getTok().getIdentifier().equals_lower("lsl")) {
+ Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
+ return MatchOperand_ParseFail;
+ }
- if (Parser.getTok().is(AsmToken::Hash)) {
- Parser.Lex();
+ // Eat 'lsl'
+ Parser.Lex();
- if (Parser.getTok().isNot(AsmToken::Integer)) {
- Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
- return MatchOperand_ParseFail;
- }
- }
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex();
+ }
+
+ if (Parser.getTok().isNot(AsmToken::Integer)) {
+ Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
+ return MatchOperand_ParseFail;
}
int64_t ShiftAmount = Parser.getTok().getIntVal();
@@ -1526,791 +2140,977 @@ AArch64AsmParser::ParseImmWithLSLOperand(
Parser.Lex(); // Eat the number
SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
- false, S, E));
+ Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
+ S, E, getContext()));
return MatchOperand_Success;
}
+/// parseCondCodeString - Parse a Condition Code string.
+AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
+ AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
+ .Case("eq", AArch64CC::EQ)
+ .Case("ne", AArch64CC::NE)
+ .Case("cs", AArch64CC::HS)
+ .Case("hs", AArch64CC::HS)
+ .Case("cc", AArch64CC::LO)
+ .Case("lo", AArch64CC::LO)
+ .Case("mi", AArch64CC::MI)
+ .Case("pl", AArch64CC::PL)
+ .Case("vs", AArch64CC::VS)
+ .Case("vc", AArch64CC::VC)
+ .Case("hi", AArch64CC::HI)
+ .Case("ls", AArch64CC::LS)
+ .Case("ge", AArch64CC::GE)
+ .Case("lt", AArch64CC::LT)
+ .Case("gt", AArch64CC::GT)
+ .Case("le", AArch64CC::LE)
+ .Case("al", AArch64CC::AL)
+ .Case("nv", AArch64CC::NV)
+ .Default(AArch64CC::Invalid);
+ return CC;
+}
+
+/// parseCondCode - Parse a Condition Code operand.
+bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
+ bool invertCondCode) {
+ SMLoc S = getLoc();
+ const AsmToken &Tok = Parser.getTok();
+ assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+
+ StringRef Cond = Tok.getString();
+ AArch64CC::CondCode CC = parseCondCodeString(Cond);
+ if (CC == AArch64CC::Invalid)
+ return TokError("invalid condition code");
+ Parser.Lex(); // Eat identifier token.
+
+ if (invertCondCode)
+ CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
+
+ Operands.push_back(
+ AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
+ return false;
+}
+/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
+/// them if present.
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseCondCodeOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- if (Parser.getTok().isNot(AsmToken::Identifier))
+AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
+ const AsmToken &Tok = Parser.getTok();
+ std::string LowerID = Tok.getString().lower();
+ AArch64_AM::ShiftExtendType ShOp =
+ StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
+ .Case("lsl", AArch64_AM::LSL)
+ .Case("lsr", AArch64_AM::LSR)
+ .Case("asr", AArch64_AM::ASR)
+ .Case("ror", AArch64_AM::ROR)
+ .Case("msl", AArch64_AM::MSL)
+ .Case("uxtb", AArch64_AM::UXTB)
+ .Case("uxth", AArch64_AM::UXTH)
+ .Case("uxtw", AArch64_AM::UXTW)
+ .Case("uxtx", AArch64_AM::UXTX)
+ .Case("sxtb", AArch64_AM::SXTB)
+ .Case("sxth", AArch64_AM::SXTH)
+ .Case("sxtw", AArch64_AM::SXTW)
+ .Case("sxtx", AArch64_AM::SXTX)
+ .Default(AArch64_AM::InvalidShiftExtend);
+
+ if (ShOp == AArch64_AM::InvalidShiftExtend)
return MatchOperand_NoMatch;
- StringRef Tok = Parser.getTok().getIdentifier();
- A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
+ SMLoc S = Tok.getLoc();
+ Parser.Lex();
- if (CondCode == A64CC::Invalid)
- return MatchOperand_NoMatch;
+ bool Hash = getLexer().is(AsmToken::Hash);
+ if (!Hash && getLexer().isNot(AsmToken::Integer)) {
+ if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
+ ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
+ ShOp == AArch64_AM::MSL) {
+ // We expect a number here.
+ TokError("expected #imm after shift specifier");
+ return MatchOperand_ParseFail;
+ }
- SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat condition code
- SMLoc E = Parser.getTok().getLoc();
+ // "extend" type operatoins don't need an immediate, #0 is implicit.
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(
+ AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
+ return MatchOperand_Success;
+ }
- Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
- return MatchOperand_Success;
-}
+ if (Hash)
+ Parser.Lex(); // Eat the '#'.
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseCRxOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- SMLoc S = Parser.getTok().getLoc();
- if (Parser.getTok().isNot(AsmToken::Identifier)) {
- Error(S, "Expected cN operand where 0 <= N <= 15");
+ // Make sure we do actually have a number
+ if (!Parser.getTok().is(AsmToken::Integer)) {
+ Error(Parser.getTok().getLoc(),
+ "expected integer shift amount");
return MatchOperand_ParseFail;
}
- StringRef Tok = Parser.getTok().getIdentifier();
- if (Tok[0] != 'c' && Tok[0] != 'C') {
- Error(S, "Expected cN operand where 0 <= N <= 15");
+ const MCExpr *ImmVal;
+ if (getParser().parseExpression(ImmVal))
return MatchOperand_ParseFail;
- }
- uint32_t CRNum;
- bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
- if (BadNum || CRNum > 15) {
- Error(S, "Expected cN operand where 0 <= N <= 15");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!MCE) {
+ TokError("expected #imm after shift specifier");
return MatchOperand_ParseFail;
}
- const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
-
- Parser.Lex();
- SMLoc E = Parser.getTok().getLoc();
-
- Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateShiftExtend(
+ ShOp, MCE->getValue(), true, S, E, getContext()));
return MatchOperand_Success;
}
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseFPImmOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
+/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
+bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
+ OperandVector &Operands) {
+ if (Name.find('.') != StringRef::npos)
+ return TokError("invalid operand");
- SMLoc S = Parser.getTok().getLoc();
+ Mnemonic = Name;
+ Operands.push_back(
+ AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
- bool Hash = false;
- if (Parser.getTok().is(AsmToken::Hash)) {
- Parser.Lex(); // Eat '#'
- Hash = true;
- }
+ const AsmToken &Tok = Parser.getTok();
+ StringRef Op = Tok.getString();
+ SMLoc S = Tok.getLoc();
- bool Negative = false;
- if (Parser.getTok().is(AsmToken::Minus)) {
- Negative = true;
- Parser.Lex(); // Eat '-'
- } else if (Parser.getTok().is(AsmToken::Plus)) {
- Parser.Lex(); // Eat '+'
+ const MCExpr *Expr = nullptr;
+
+#define SYS_ALIAS(op1, Cn, Cm, op2) \
+ do { \
+ Expr = MCConstantExpr::Create(op1, getContext()); \
+ Operands.push_back( \
+ AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
+ Operands.push_back( \
+ AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
+ Operands.push_back( \
+ AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
+ Expr = MCConstantExpr::Create(op2, getContext()); \
+ Operands.push_back( \
+ AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
+ } while (0)
+
+ if (Mnemonic == "ic") {
+ if (!Op.compare_lower("ialluis")) {
+ // SYS #0, C7, C1, #0
+ SYS_ALIAS(0, 7, 1, 0);
+ } else if (!Op.compare_lower("iallu")) {
+ // SYS #0, C7, C5, #0
+ SYS_ALIAS(0, 7, 5, 0);
+ } else if (!Op.compare_lower("ivau")) {
+ // SYS #3, C7, C5, #1
+ SYS_ALIAS(3, 7, 5, 1);
+ } else {
+ return TokError("invalid operand for IC instruction");
+ }
+ } else if (Mnemonic == "dc") {
+ if (!Op.compare_lower("zva")) {
+ // SYS #3, C7, C4, #1
+ SYS_ALIAS(3, 7, 4, 1);
+ } else if (!Op.compare_lower("ivac")) {
+ // SYS #3, C7, C6, #1
+ SYS_ALIAS(0, 7, 6, 1);
+ } else if (!Op.compare_lower("isw")) {
+ // SYS #0, C7, C6, #2
+ SYS_ALIAS(0, 7, 6, 2);
+ } else if (!Op.compare_lower("cvac")) {
+ // SYS #3, C7, C10, #1
+ SYS_ALIAS(3, 7, 10, 1);
+ } else if (!Op.compare_lower("csw")) {
+ // SYS #0, C7, C10, #2
+ SYS_ALIAS(0, 7, 10, 2);
+ } else if (!Op.compare_lower("cvau")) {
+ // SYS #3, C7, C11, #1
+ SYS_ALIAS(3, 7, 11, 1);
+ } else if (!Op.compare_lower("civac")) {
+ // SYS #3, C7, C14, #1
+ SYS_ALIAS(3, 7, 14, 1);
+ } else if (!Op.compare_lower("cisw")) {
+ // SYS #0, C7, C14, #2
+ SYS_ALIAS(0, 7, 14, 2);
+ } else {
+ return TokError("invalid operand for DC instruction");
+ }
+ } else if (Mnemonic == "at") {
+ if (!Op.compare_lower("s1e1r")) {
+ // SYS #0, C7, C8, #0
+ SYS_ALIAS(0, 7, 8, 0);
+ } else if (!Op.compare_lower("s1e2r")) {
+ // SYS #4, C7, C8, #0
+ SYS_ALIAS(4, 7, 8, 0);
+ } else if (!Op.compare_lower("s1e3r")) {
+ // SYS #6, C7, C8, #0
+ SYS_ALIAS(6, 7, 8, 0);
+ } else if (!Op.compare_lower("s1e1w")) {
+ // SYS #0, C7, C8, #1
+ SYS_ALIAS(0, 7, 8, 1);
+ } else if (!Op.compare_lower("s1e2w")) {
+ // SYS #4, C7, C8, #1
+ SYS_ALIAS(4, 7, 8, 1);
+ } else if (!Op.compare_lower("s1e3w")) {
+ // SYS #6, C7, C8, #1
+ SYS_ALIAS(6, 7, 8, 1);
+ } else if (!Op.compare_lower("s1e0r")) {
+ // SYS #0, C7, C8, #3
+ SYS_ALIAS(0, 7, 8, 2);
+ } else if (!Op.compare_lower("s1e0w")) {
+ // SYS #0, C7, C8, #3
+ SYS_ALIAS(0, 7, 8, 3);
+ } else if (!Op.compare_lower("s12e1r")) {
+ // SYS #4, C7, C8, #4
+ SYS_ALIAS(4, 7, 8, 4);
+ } else if (!Op.compare_lower("s12e1w")) {
+ // SYS #4, C7, C8, #5
+ SYS_ALIAS(4, 7, 8, 5);
+ } else if (!Op.compare_lower("s12e0r")) {
+ // SYS #4, C7, C8, #6
+ SYS_ALIAS(4, 7, 8, 6);
+ } else if (!Op.compare_lower("s12e0w")) {
+ // SYS #4, C7, C8, #7
+ SYS_ALIAS(4, 7, 8, 7);
+ } else {
+ return TokError("invalid operand for AT instruction");
+ }
+ } else if (Mnemonic == "tlbi") {
+ if (!Op.compare_lower("vmalle1is")) {
+ // SYS #0, C8, C3, #0
+ SYS_ALIAS(0, 8, 3, 0);
+ } else if (!Op.compare_lower("alle2is")) {
+ // SYS #4, C8, C3, #0
+ SYS_ALIAS(4, 8, 3, 0);
+ } else if (!Op.compare_lower("alle3is")) {
+ // SYS #6, C8, C3, #0
+ SYS_ALIAS(6, 8, 3, 0);
+ } else if (!Op.compare_lower("vae1is")) {
+ // SYS #0, C8, C3, #1
+ SYS_ALIAS(0, 8, 3, 1);
+ } else if (!Op.compare_lower("vae2is")) {
+ // SYS #4, C8, C3, #1
+ SYS_ALIAS(4, 8, 3, 1);
+ } else if (!Op.compare_lower("vae3is")) {
+ // SYS #6, C8, C3, #1
+ SYS_ALIAS(6, 8, 3, 1);
+ } else if (!Op.compare_lower("aside1is")) {
+ // SYS #0, C8, C3, #2
+ SYS_ALIAS(0, 8, 3, 2);
+ } else if (!Op.compare_lower("vaae1is")) {
+ // SYS #0, C8, C3, #3
+ SYS_ALIAS(0, 8, 3, 3);
+ } else if (!Op.compare_lower("alle1is")) {
+ // SYS #4, C8, C3, #4
+ SYS_ALIAS(4, 8, 3, 4);
+ } else if (!Op.compare_lower("vale1is")) {
+ // SYS #0, C8, C3, #5
+ SYS_ALIAS(0, 8, 3, 5);
+ } else if (!Op.compare_lower("vaale1is")) {
+ // SYS #0, C8, C3, #7
+ SYS_ALIAS(0, 8, 3, 7);
+ } else if (!Op.compare_lower("vmalle1")) {
+ // SYS #0, C8, C7, #0
+ SYS_ALIAS(0, 8, 7, 0);
+ } else if (!Op.compare_lower("alle2")) {
+ // SYS #4, C8, C7, #0
+ SYS_ALIAS(4, 8, 7, 0);
+ } else if (!Op.compare_lower("vale2is")) {
+ // SYS #4, C8, C3, #5
+ SYS_ALIAS(4, 8, 3, 5);
+ } else if (!Op.compare_lower("vale3is")) {
+ // SYS #6, C8, C3, #5
+ SYS_ALIAS(6, 8, 3, 5);
+ } else if (!Op.compare_lower("alle3")) {
+ // SYS #6, C8, C7, #0
+ SYS_ALIAS(6, 8, 7, 0);
+ } else if (!Op.compare_lower("vae1")) {
+ // SYS #0, C8, C7, #1
+ SYS_ALIAS(0, 8, 7, 1);
+ } else if (!Op.compare_lower("vae2")) {
+ // SYS #4, C8, C7, #1
+ SYS_ALIAS(4, 8, 7, 1);
+ } else if (!Op.compare_lower("vae3")) {
+ // SYS #6, C8, C7, #1
+ SYS_ALIAS(6, 8, 7, 1);
+ } else if (!Op.compare_lower("aside1")) {
+ // SYS #0, C8, C7, #2
+ SYS_ALIAS(0, 8, 7, 2);
+ } else if (!Op.compare_lower("vaae1")) {
+ // SYS #0, C8, C7, #3
+ SYS_ALIAS(0, 8, 7, 3);
+ } else if (!Op.compare_lower("alle1")) {
+ // SYS #4, C8, C7, #4
+ SYS_ALIAS(4, 8, 7, 4);
+ } else if (!Op.compare_lower("vale1")) {
+ // SYS #0, C8, C7, #5
+ SYS_ALIAS(0, 8, 7, 5);
+ } else if (!Op.compare_lower("vale2")) {
+ // SYS #4, C8, C7, #5
+ SYS_ALIAS(4, 8, 7, 5);
+ } else if (!Op.compare_lower("vale3")) {
+ // SYS #6, C8, C7, #5
+ SYS_ALIAS(6, 8, 7, 5);
+ } else if (!Op.compare_lower("vaale1")) {
+ // SYS #0, C8, C7, #7
+ SYS_ALIAS(0, 8, 7, 7);
+ } else if (!Op.compare_lower("ipas2e1")) {
+ // SYS #4, C8, C4, #1
+ SYS_ALIAS(4, 8, 4, 1);
+ } else if (!Op.compare_lower("ipas2le1")) {
+ // SYS #4, C8, C4, #5
+ SYS_ALIAS(4, 8, 4, 5);
+ } else if (!Op.compare_lower("ipas2e1is")) {
+ // SYS #4, C8, C4, #1
+ SYS_ALIAS(4, 8, 0, 1);
+ } else if (!Op.compare_lower("ipas2le1is")) {
+ // SYS #4, C8, C4, #5
+ SYS_ALIAS(4, 8, 0, 5);
+ } else if (!Op.compare_lower("vmalls12e1")) {
+ // SYS #4, C8, C7, #6
+ SYS_ALIAS(4, 8, 7, 6);
+ } else if (!Op.compare_lower("vmalls12e1is")) {
+ // SYS #4, C8, C3, #6
+ SYS_ALIAS(4, 8, 3, 6);
+ } else {
+ return TokError("invalid operand for TLBI instruction");
+ }
}
- if (Parser.getTok().isNot(AsmToken::Real)) {
- if (!Hash)
- return MatchOperand_NoMatch;
- Error(S, "Expected floating-point immediate");
- return MatchOperand_ParseFail;
- }
+#undef SYS_ALIAS
- APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
- if (Negative) RealVal.changeSign();
- double DblVal = RealVal.convertToDouble();
+ Parser.Lex(); // Eat operand.
- Parser.Lex(); // Eat real number
- SMLoc E = Parser.getTok().getLoc();
+ bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
+ bool HasRegister = false;
- Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
- return MatchOperand_Success;
-}
+ // Check for the optional register operand.
+ if (getLexer().is(AsmToken::Comma)) {
+ Parser.Lex(); // Eat comma.
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseFPImm0AndImm0Operand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
+ return TokError("expected register operand");
- SMLoc S = Parser.getTok().getLoc();
+ HasRegister = true;
+ }
- bool Hash = false;
- if (Parser.getTok().is(AsmToken::Hash)) {
- Parser.Lex(); // Eat '#'
- Hash = true;
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Parser.eatToEndOfStatement();
+ return TokError("unexpected token in argument list");
+ }
+
+ if (ExpectRegister && !HasRegister) {
+ return TokError("specified " + Mnemonic + " op requires a register");
+ }
+ else if (!ExpectRegister && HasRegister) {
+ return TokError("specified " + Mnemonic + " op does not use a register");
}
- APFloat RealVal(0.0);
- if (Parser.getTok().is(AsmToken::Real)) {
- if(Parser.getTok().getString() != "0.0") {
- Error(S, "only #0.0 is acceptable as immediate");
+ Parser.Lex(); // Consume the EndOfStatement
+ return false;
+}
+
+AArch64AsmParser::OperandMatchResultTy
+AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
+ const AsmToken &Tok = Parser.getTok();
+
+ // Can be either a #imm style literal or an option name
+ bool Hash = Tok.is(AsmToken::Hash);
+ if (Hash || Tok.is(AsmToken::Integer)) {
+ // Immediate operand.
+ if (Hash)
+ Parser.Lex(); // Eat the '#'
+ const MCExpr *ImmVal;
+ SMLoc ExprLoc = getLoc();
+ if (getParser().parseExpression(ImmVal))
+ return MatchOperand_ParseFail;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!MCE) {
+ Error(ExprLoc, "immediate value expected for barrier operand");
return MatchOperand_ParseFail;
}
- }
- else if (Parser.getTok().is(AsmToken::Integer)) {
- if(Parser.getTok().getIntVal() != 0) {
- Error(S, "only #0.0 is acceptable as immediate");
+ if (MCE->getValue() < 0 || MCE->getValue() > 15) {
+ Error(ExprLoc, "barrier operand out of range");
return MatchOperand_ParseFail;
}
+ Operands.push_back(
+ AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
+ return MatchOperand_Success;
}
- else {
- if (!Hash)
- return MatchOperand_NoMatch;
- Error(S, "only #0.0 is acceptable as immediate");
+
+ if (Tok.isNot(AsmToken::Identifier)) {
+ TokError("invalid operand for instruction");
return MatchOperand_ParseFail;
}
- Parser.Lex(); // Eat real number
- SMLoc E = Parser.getTok().getLoc();
+ bool Valid;
+ unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
+ if (!Valid) {
+ TokError("invalid barrier option name");
+ return MatchOperand_ParseFail;
+ }
+
+ // The only valid named option for ISB is 'sy'
+ if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
+ TokError("'sy' or #imm operand expected");
+ return MatchOperand_ParseFail;
+ }
+
+ Operands.push_back(
+ AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
+ Parser.Lex(); // Consume the option
- Operands.push_back(AArch64Operand::CreateFPImm(0.0, S, E));
return MatchOperand_Success;
}
-// Automatically generated
-static unsigned MatchRegisterName(StringRef Name);
-
-bool
-AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
- StringRef &Layout,
- SMLoc &LayoutLoc) const {
+AArch64AsmParser::OperandMatchResultTy
+AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
- return false;
+ return MatchOperand_NoMatch;
- std::string LowerReg = Tok.getString().lower();
- size_t DotPos = LowerReg.find('.');
+ Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
+ STI.getFeatureBits(), getContext()));
+ Parser.Lex(); // Eat identifier
- bool IsVec128 = false;
- SMLoc S = Tok.getLoc();
- RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
+ return MatchOperand_Success;
+}
- if (DotPos == std::string::npos) {
- Layout = StringRef();
- } else {
- // Everything afterwards needs to be a literal token, expected to be
- // '.2d','.b' etc for vector registers.
-
- // This StringSwitch validates the input and (perhaps more importantly)
- // gives us a permanent string to use in the token (a pointer into LowerReg
- // would go out of scope when we return).
- LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
- StringRef LayoutText = StringRef(LowerReg).substr(DotPos);
-
- // See if it's a 128-bit layout first.
- Layout = StringSwitch<const char *>(LayoutText)
- .Case(".q", ".q").Case(".1q", ".1q")
- .Case(".d", ".d").Case(".2d", ".2d")
- .Case(".s", ".s").Case(".4s", ".4s")
- .Case(".h", ".h").Case(".8h", ".8h")
- .Case(".b", ".b").Case(".16b", ".16b")
- .Default("");
-
- if (Layout.size() != 0)
- IsVec128 = true;
- else {
- Layout = StringSwitch<const char *>(LayoutText)
- .Case(".1d", ".1d")
- .Case(".2s", ".2s")
- .Case(".4h", ".4h")
- .Case(".8b", ".8b")
- .Default("");
+/// tryParseVectorRegister - Parse a vector register operand.
+bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
+ if (Parser.getTok().isNot(AsmToken::Identifier))
+ return true;
+
+ SMLoc S = getLoc();
+ // Check for a vector register specifier first.
+ StringRef Kind;
+ int64_t Reg = tryMatchVectorRegister(Kind, false);
+ if (Reg == -1)
+ return true;
+ Operands.push_back(
+ AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
+ // If there was an explicit qualifier, that goes on as a literal text
+ // operand.
+ if (!Kind.empty())
+ Operands.push_back(
+ AArch64Operand::CreateToken(Kind, false, S, getContext()));
+
+ // If there is an index specifier following the register, parse that too.
+ if (Parser.getTok().is(AsmToken::LBrac)) {
+ SMLoc SIdx = getLoc();
+ Parser.Lex(); // Eat left bracket token.
+
+ const MCExpr *ImmVal;
+ if (getParser().parseExpression(ImmVal))
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!MCE) {
+ TokError("immediate value expected for vector index");
+ return false;
}
- if (Layout.size() == 0) {
- // If we've still not pinned it down the register is malformed.
+ SMLoc E = getLoc();
+ if (Parser.getTok().isNot(AsmToken::RBrac)) {
+ Error(E, "']' expected");
return false;
}
- }
- RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
- if (RegNum == AArch64::NoRegister) {
- RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
- .Case("ip0", AArch64::X16)
- .Case("ip1", AArch64::X17)
- .Case("fp", AArch64::X29)
- .Case("lr", AArch64::X30)
- .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
- .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
- .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
- .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
- .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
- .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
- .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
- .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
- .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
- .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
- .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
- .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
- .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
- .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
- .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
- .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
- .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
- .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
- .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
- .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
- .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
- .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
- .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
- .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
- .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
- .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
- .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
- .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
- .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
- .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
- .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
- .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
- .Default(AArch64::NoRegister);
- }
- if (RegNum == AArch64::NoRegister)
- return false;
+ Parser.Lex(); // Eat right bracket token.
- return true;
+ Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
+ E, getContext()));
+ }
+
+ return false;
}
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- uint32_t &NumLanes) {
- unsigned RegNum;
- StringRef Layout;
- SMLoc RegEndLoc, LayoutLoc;
- SMLoc S = Parser.getTok().getLoc();
-
- if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
- return MatchOperand_NoMatch;
+/// parseRegister - Parse a non-vector register operand.
+bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ // Try for a vector register.
+ if (!tryParseVectorRegister(Operands))
+ return false;
- Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
+ // Try for a scalar register.
+ int64_t Reg = tryParseRegister();
+ if (Reg == -1)
+ return true;
+ Operands.push_back(
+ AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
- if (Layout.size() != 0) {
- unsigned long long TmpLanes = 0;
- llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
- if (TmpLanes != 0) {
- NumLanes = TmpLanes;
- } else {
- // If the number of lanes isn't specified explicitly, a valid instruction
- // will have an element specifier and be capable of acting on the entire
- // vector register.
- switch (Layout.back()) {
- default: llvm_unreachable("Invalid layout specifier");
- case 'b': NumLanes = 16; break;
- case 'h': NumLanes = 8; break;
- case 's': NumLanes = 4; break;
- case 'd': NumLanes = 2; break;
- case 'q': NumLanes = 1; break;
+ // A small number of instructions (FMOVXDhighr, for example) have "[1]"
+ // as a string token in the instruction itself.
+ if (getLexer().getKind() == AsmToken::LBrac) {
+ SMLoc LBracS = getLoc();
+ Parser.Lex();
+ const AsmToken &Tok = Parser.getTok();
+ if (Tok.is(AsmToken::Integer)) {
+ SMLoc IntS = getLoc();
+ int64_t Val = Tok.getIntVal();
+ if (Val == 1) {
+ Parser.Lex();
+ if (getLexer().getKind() == AsmToken::RBrac) {
+ SMLoc RBracS = getLoc();
+ Parser.Lex();
+ Operands.push_back(
+ AArch64Operand::CreateToken("[", false, LBracS, getContext()));
+ Operands.push_back(
+ AArch64Operand::CreateToken("1", false, IntS, getContext()));
+ Operands.push_back(
+ AArch64Operand::CreateToken("]", false, RBracS, getContext()));
+ return false;
+ }
}
}
-
- Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
}
- Parser.Lex();
- return MatchOperand_Success;
-}
-
-bool
-AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
- SMLoc &EndLoc) {
- // This callback is used for things like DWARF frame directives in
- // assembly. They don't care about things like NEON layouts or lanes, they
- // just want to be able to produce the DWARF register number.
- StringRef LayoutSpec;
- SMLoc RegEndLoc, LayoutLoc;
- StartLoc = Parser.getTok().getLoc();
-
- if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
- return true;
-
- Parser.Lex();
- EndLoc = Parser.getTok().getLoc();
-
return false;
}
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Since these operands occur in very limited circumstances, without
- // alternatives, we actually signal an error if there is no match. If relaxing
- // this, beware of unintended consequences: an immediate will be accepted
- // during matching, no matter how it gets into the AArch64Operand.
- const AsmToken &Tok = Parser.getTok();
- SMLoc S = Tok.getLoc();
+bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
+ bool HasELFModifier = false;
+ AArch64MCExpr::VariantKind RefKind;
- if (Tok.is(AsmToken::Identifier)) {
- bool ValidName;
- uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
+ if (Parser.getTok().is(AsmToken::Colon)) {
+ Parser.Lex(); // Eat ':"
+ HasELFModifier = true;
- if (!ValidName) {
- Error(S, "operand specifier not recognised");
- return MatchOperand_ParseFail;
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ Error(Parser.getTok().getLoc(),
+ "expect relocation specifier in operand after ':'");
+ return true;
}
- Parser.Lex(); // We're done with the identifier. Eat it
-
- SMLoc E = Parser.getTok().getLoc();
- const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
- Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
- return MatchOperand_Success;
- } else if (Tok.is(AsmToken::Hash)) {
- Parser.Lex();
+ std::string LowerCase = Parser.getTok().getIdentifier().lower();
+ RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
+ .Case("lo12", AArch64MCExpr::VK_LO12)
+ .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
+ .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
+ .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
+ .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
+ .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
+ .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
+ .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
+ .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
+ .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
+ .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
+ .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
+ .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
+ .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
+ .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
+ .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
+ .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
+ .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
+ .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
+ .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
+ .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
+ .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
+ .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
+ .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
+ .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
+ .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
+ .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
+ .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
+ .Case("got", AArch64MCExpr::VK_GOT_PAGE)
+ .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
+ .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
+ .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
+ .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
+ .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
+ .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
+ .Default(AArch64MCExpr::VK_INVALID);
+
+ if (RefKind == AArch64MCExpr::VK_INVALID) {
+ Error(Parser.getTok().getLoc(),
+ "expect relocation specifier in operand after ':'");
+ return true;
+ }
- const MCExpr *ImmVal;
- if (ParseImmediate(ImmVal) != MatchOperand_Success)
- return MatchOperand_ParseFail;
+ Parser.Lex(); // Eat identifier
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
- if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
- Error(S, "Invalid immediate for instruction");
- return MatchOperand_ParseFail;
+ if (Parser.getTok().isNot(AsmToken::Colon)) {
+ Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
+ return true;
}
-
- SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
- return MatchOperand_Success;
+ Parser.Lex(); // Eat ':'
}
- Error(S, "unexpected operand for instruction");
- return MatchOperand_ParseFail;
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseSysRegOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- const AsmToken &Tok = Parser.getTok();
-
- // Any MSR/MRS operand will be an identifier, and we want to store it as some
- // kind of string: SPSel is valid for two different forms of MSR with two
- // different encodings. There's no collision at the moment, but the potential
- // is there.
- if (!Tok.is(AsmToken::Identifier)) {
- return MatchOperand_NoMatch;
- }
+ if (getParser().parseExpression(ImmVal))
+ return true;
- SMLoc S = Tok.getLoc();
- Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
- Parser.Lex(); // Eat identifier
+ if (HasELFModifier)
+ ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
- return MatchOperand_Success;
+ return false;
}
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseLSXAddressOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- SMLoc S = Parser.getTok().getLoc();
-
- unsigned RegNum;
- SMLoc RegEndLoc, LayoutLoc;
- StringRef Layout;
- if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
- || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
- || Layout.size() != 0) {
- // Check Layout.size because we don't want to let "x3.4s" or similar
- // through.
- return MatchOperand_NoMatch;
- }
- Parser.Lex(); // Eat register
+/// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
+bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
+ assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
+ SMLoc S = getLoc();
+ Parser.Lex(); // Eat left bracket token.
+ StringRef Kind;
+ int64_t FirstReg = tryMatchVectorRegister(Kind, true);
+ if (FirstReg == -1)
+ return true;
+ int64_t PrevReg = FirstReg;
+ unsigned Count = 1;
- if (Parser.getTok().is(AsmToken::RBrac)) {
- // We're done
- SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
- return MatchOperand_Success;
- }
+ if (Parser.getTok().is(AsmToken::Minus)) {
+ Parser.Lex(); // Eat the minus.
- // Otherwise, only ", #0" is valid
+ SMLoc Loc = getLoc();
+ StringRef NextKind;
+ int64_t Reg = tryMatchVectorRegister(NextKind, true);
+ if (Reg == -1)
+ return true;
+ // Any Kind suffices must match on all regs in the list.
+ if (Kind != NextKind)
+ return Error(Loc, "mismatched register size suffix");
- if (Parser.getTok().isNot(AsmToken::Comma)) {
- Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
- return MatchOperand_ParseFail;
- }
- Parser.Lex(); // Eat ','
+ unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
- if (Parser.getTok().isNot(AsmToken::Hash)) {
- Error(Parser.getTok().getLoc(), "expected '#0'");
- return MatchOperand_ParseFail;
- }
- Parser.Lex(); // Eat '#'
+ if (Space == 0 || Space > 3) {
+ return Error(Loc, "invalid number of vectors");
+ }
- if (Parser.getTok().isNot(AsmToken::Integer)
- || Parser.getTok().getIntVal() != 0 ) {
- Error(Parser.getTok().getLoc(), "expected '#0'");
- return MatchOperand_ParseFail;
+ Count += Space;
}
- Parser.Lex(); // Eat '0'
-
- SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
- return MatchOperand_Success;
-}
+ else {
+ while (Parser.getTok().is(AsmToken::Comma)) {
+ Parser.Lex(); // Eat the comma token.
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseShiftExtend(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- StringRef IDVal = Parser.getTok().getIdentifier();
- std::string LowerID = IDVal.lower();
-
- A64SE::ShiftExtSpecifiers Spec =
- StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
- .Case("lsl", A64SE::LSL)
- .Case("msl", A64SE::MSL)
- .Case("lsr", A64SE::LSR)
- .Case("asr", A64SE::ASR)
- .Case("ror", A64SE::ROR)
- .Case("uxtb", A64SE::UXTB)
- .Case("uxth", A64SE::UXTH)
- .Case("uxtw", A64SE::UXTW)
- .Case("uxtx", A64SE::UXTX)
- .Case("sxtb", A64SE::SXTB)
- .Case("sxth", A64SE::SXTH)
- .Case("sxtw", A64SE::SXTW)
- .Case("sxtx", A64SE::SXTX)
- .Default(A64SE::Invalid);
-
- if (Spec == A64SE::Invalid)
- return MatchOperand_NoMatch;
+ SMLoc Loc = getLoc();
+ StringRef NextKind;
+ int64_t Reg = tryMatchVectorRegister(NextKind, true);
+ if (Reg == -1)
+ return true;
+ // Any Kind suffices must match on all regs in the list.
+ if (Kind != NextKind)
+ return Error(Loc, "mismatched register size suffix");
- // Eat the shift
- SMLoc S, E;
- S = Parser.getTok().getLoc();
- Parser.Lex();
+ // Registers must be incremental (with wraparound at 31)
+ if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
+ (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
+ return Error(Loc, "registers must be sequential");
- if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
- Spec != A64SE::ROR && Spec != A64SE::MSL) {
- // The shift amount can be omitted for the extending versions, but not real
- // shifts:
- // add x0, x0, x0, uxtb
- // is valid, and equivalent to
- // add x0, x0, x0, uxtb #0
-
- if (Parser.getTok().is(AsmToken::Comma) ||
- Parser.getTok().is(AsmToken::EndOfStatement) ||
- Parser.getTok().is(AsmToken::RBrac)) {
- Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
- S, E));
- return MatchOperand_Success;
+ PrevReg = Reg;
+ ++Count;
}
}
- // Eat # at beginning of immediate
- if (!Parser.getTok().is(AsmToken::Hash)) {
- Error(Parser.getTok().getLoc(),
- "expected #imm after shift specifier");
- return MatchOperand_ParseFail;
- }
- Parser.Lex();
-
- // Make sure we do actually have a number
- if (!Parser.getTok().is(AsmToken::Integer)) {
- Error(Parser.getTok().getLoc(),
- "expected integer shift amount");
- return MatchOperand_ParseFail;
- }
- unsigned Amount = Parser.getTok().getIntVal();
- Parser.Lex();
- E = Parser.getTok().getLoc();
+ if (Parser.getTok().isNot(AsmToken::RCurly))
+ return Error(getLoc(), "'}' expected");
+ Parser.Lex(); // Eat the '}' token.
- Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
- S, E));
+ if (Count > 4)
+ return Error(S, "invalid number of vectors");
- return MatchOperand_Success;
-}
+ unsigned NumElements = 0;
+ char ElementKind = 0;
+ if (!Kind.empty())
+ parseValidVectorKind(Kind, NumElements, ElementKind);
-/// Try to parse a vector register token, If it is a vector register,
-/// the token is eaten and return true. Otherwise return false.
-bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc,
- StringRef &Layout, SMLoc &LayoutLoc) {
- bool IsVector = true;
-
- if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
- IsVector = false;
- else if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID]
- .contains(RegNum) &&
- !AArch64MCRegisterClasses[AArch64::FPR128RegClassID]
- .contains(RegNum))
- IsVector = false;
- else if (Layout.size() == 0)
- IsVector = false;
-
- if (!IsVector)
- Error(Parser.getTok().getLoc(), "expected vector type register");
-
- Parser.Lex(); // Eat this token.
- return IsVector;
-}
+ Operands.push_back(AArch64Operand::CreateVectorList(
+ FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
+ // If there is an index specifier following the list, parse that too.
+ if (Parser.getTok().is(AsmToken::LBrac)) {
+ SMLoc SIdx = getLoc();
+ Parser.Lex(); // Eat left bracket token.
-// A vector list contains 1-4 consecutive registers.
-// Now there are two kinds of vector list when number of vector > 1:
-// (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
-// (2) {Vn.layout - Vm.layout}
-// If the layout is like .b/.h/.s/.d, also parse the lane.
-AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- if (Parser.getTok().isNot(AsmToken::LCurly)) {
- Error(Parser.getTok().getLoc(), "'{' expected");
- return MatchOperand_ParseFail;
- }
- SMLoc SLoc = Parser.getTok().getLoc();
- Parser.Lex(); // Eat '{' token.
+ const MCExpr *ImmVal;
+ if (getParser().parseExpression(ImmVal))
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!MCE) {
+ TokError("immediate value expected for vector index");
+ return false;
+ }
- unsigned Reg, Count = 1;
- StringRef LayoutStr;
- SMLoc RegEndLoc, LayoutLoc;
- if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc))
- return MatchOperand_ParseFail;
+ SMLoc E = getLoc();
+ if (Parser.getTok().isNot(AsmToken::RBrac)) {
+ Error(E, "']' expected");
+ return false;
+ }
- if (Parser.getTok().is(AsmToken::Minus)) {
- Parser.Lex(); // Eat the minus.
+ Parser.Lex(); // Eat right bracket token.
- unsigned Reg2;
- StringRef LayoutStr2;
- SMLoc RegEndLoc2, LayoutLoc2;
- SMLoc RegLoc2 = Parser.getTok().getLoc();
+ Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
+ E, getContext()));
+ }
+ return false;
+}
- if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
- return MatchOperand_ParseFail;
- unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg);
+AArch64AsmParser::OperandMatchResultTy
+AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
+ const AsmToken &Tok = Parser.getTok();
+ if (!Tok.is(AsmToken::Identifier))
+ return MatchOperand_NoMatch;
- if (LayoutStr != LayoutStr2) {
- Error(LayoutLoc2, "expected the same vector layout");
- return MatchOperand_ParseFail;
- }
- if (Space == 0 || Space > 3) {
- Error(RegLoc2, "invalid number of vectors");
- return MatchOperand_ParseFail;
- }
+ unsigned RegNum = MatchRegisterName(Tok.getString().lower());
- Count += Space;
- } else {
- unsigned LastReg = Reg;
- while (Parser.getTok().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat the comma.
- unsigned Reg2;
- StringRef LayoutStr2;
- SMLoc RegEndLoc2, LayoutLoc2;
- SMLoc RegLoc2 = Parser.getTok().getLoc();
+ MCContext &Ctx = getContext();
+ const MCRegisterInfo *RI = Ctx.getRegisterInfo();
+ if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
+ return MatchOperand_NoMatch;
- if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
- return MatchOperand_ParseFail;
- unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg)
- : (Reg2 + 32 - LastReg);
- Count++;
-
- // The space between two vectors should be 1. And they should have the same layout.
- // Total count shouldn't be great than 4
- if (Space != 1) {
- Error(RegLoc2, "invalid space between two vectors");
- return MatchOperand_ParseFail;
- }
- if (LayoutStr != LayoutStr2) {
- Error(LayoutLoc2, "expected the same vector layout");
- return MatchOperand_ParseFail;
- }
- if (Count > 4) {
- Error(RegLoc2, "invalid number of vectors");
- return MatchOperand_ParseFail;
- }
+ SMLoc S = getLoc();
+ Parser.Lex(); // Eat register
- LastReg = Reg2;
- }
+ if (Parser.getTok().isNot(AsmToken::Comma)) {
+ Operands.push_back(
+ AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
+ return MatchOperand_Success;
}
+ Parser.Lex(); // Eat comma.
- if (Parser.getTok().isNot(AsmToken::RCurly)) {
- Error(Parser.getTok().getLoc(), "'}' expected");
+ if (Parser.getTok().is(AsmToken::Hash))
+ Parser.Lex(); // Eat hash
+
+ if (Parser.getTok().isNot(AsmToken::Integer)) {
+ Error(getLoc(), "index must be absent or #0");
return MatchOperand_ParseFail;
}
- SMLoc ELoc = Parser.getTok().getLoc();
- Parser.Lex(); // Eat '}' token.
- A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr);
- if (Count > 1) { // If count > 1, create vector list using super register.
- bool IsVec64 = (Layout < A64Layout::VL_16B);
- static unsigned SupRegIDs[3][2] = {
- { AArch64::QPairRegClassID, AArch64::DPairRegClassID },
- { AArch64::QTripleRegClassID, AArch64::DTripleRegClassID },
- { AArch64::QQuadRegClassID, AArch64::DQuadRegClassID }
- };
- unsigned SupRegID = SupRegIDs[Count - 2][static_cast<int>(IsVec64)];
- unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
- const MCRegisterInfo *MRI = getContext().getRegisterInfo();
- Reg = MRI->getMatchingSuperReg(Reg, Sub0,
- &AArch64MCRegisterClasses[SupRegID]);
+ const MCExpr *ImmVal;
+ if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
+ cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
+ Error(getLoc(), "index must be absent or #0");
+ return MatchOperand_ParseFail;
}
- Operands.push_back(
- AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc));
- if (Parser.getTok().is(AsmToken::LBrac)) {
- uint32_t NumLanes = 0;
- switch(Layout) {
- case A64Layout::VL_B : NumLanes = 16; break;
- case A64Layout::VL_H : NumLanes = 8; break;
- case A64Layout::VL_S : NumLanes = 4; break;
- case A64Layout::VL_D : NumLanes = 2; break;
- default:
- SMLoc Loc = getLexer().getLoc();
- Error(Loc, "expected comma before next operand");
- return MatchOperand_ParseFail;
- }
- return ParseNEONLane(Operands, NumLanes);
- } else {
- return MatchOperand_Success;
- }
+ Operands.push_back(
+ AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
+ return MatchOperand_Success;
}
-// FIXME: We would really like to be able to tablegen'erate this.
-bool AArch64AsmParser::
-validateInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- switch (Inst.getOpcode()) {
- case AArch64::BFIwwii:
- case AArch64::BFIxxii:
- case AArch64::SBFIZwwii:
- case AArch64::SBFIZxxii:
- case AArch64::UBFIZwwii:
- case AArch64::UBFIZxxii: {
- unsigned ImmOps = Inst.getNumOperands() - 2;
- int64_t ImmR = Inst.getOperand(ImmOps).getImm();
- int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
-
- if (ImmR != 0 && ImmS >= ImmR) {
- return Error(Operands[4]->getStartLoc(),
- "requested insert overflows register");
- }
+/// parseOperand - Parse a arm instruction operand. For now this parses the
+/// operand regardless of the mnemonic.
+bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
+ bool invertCondCode) {
+ // Check if the current operand has a custom associated parser, if so, try to
+ // custom parse the operand, or fallback to the general approach.
+ OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
+ if (ResTy == MatchOperand_Success)
return false;
- }
- case AArch64::BFXILwwii:
- case AArch64::BFXILxxii:
- case AArch64::SBFXwwii:
- case AArch64::SBFXxxii:
- case AArch64::UBFXwwii:
- case AArch64::UBFXxxii: {
- unsigned ImmOps = Inst.getNumOperands() - 2;
- int64_t ImmR = Inst.getOperand(ImmOps).getImm();
- int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
- int64_t RegWidth = 0;
- switch (Inst.getOpcode()) {
- case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
- RegWidth = 64;
- break;
- case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
- RegWidth = 32;
- break;
- }
+ // If there wasn't a custom match, try the generic matcher below. Otherwise,
+ // there was a match, but an error occurred, in which case, just return that
+ // the operand parsing failed.
+ if (ResTy == MatchOperand_ParseFail)
+ return true;
- if (ImmS >= RegWidth || ImmS < ImmR) {
- return Error(Operands[4]->getStartLoc(),
- "requested extract overflows register");
- }
+ // Nothing custom, so do general case parsing.
+ SMLoc S, E;
+ switch (getLexer().getKind()) {
+ default: {
+ SMLoc S = getLoc();
+ const MCExpr *Expr;
+ if (parseSymbolicImmVal(Expr))
+ return Error(S, "invalid operand");
+
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
return false;
}
- case AArch64::ICix: {
- int64_t ImmVal = Inst.getOperand(0).getImm();
- A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
- if (!A64IC::NeedsRegister(ICOp)) {
- return Error(Operands[1]->getStartLoc(),
- "specified IC op does not use a register");
- }
- return false;
+ case AsmToken::LBrac: {
+ SMLoc Loc = Parser.getTok().getLoc();
+ Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
+ getContext()));
+ Parser.Lex(); // Eat '['
+
+ // There's no comma after a '[', so we can parse the next operand
+ // immediately.
+ return parseOperand(Operands, false, false);
}
- case AArch64::ICi: {
- int64_t ImmVal = Inst.getOperand(0).getImm();
- A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
- if (A64IC::NeedsRegister(ICOp)) {
- return Error(Operands[1]->getStartLoc(),
- "specified IC op requires a register");
- }
+ case AsmToken::LCurly:
+ return parseVectorList(Operands);
+ case AsmToken::Identifier: {
+ // If we're expecting a Condition Code operand, then just parse that.
+ if (isCondCode)
+ return parseCondCode(Operands, invertCondCode);
+
+ // If it's a register name, parse it.
+ if (!parseRegister(Operands))
+ return false;
+
+ // This could be an optional "shift" or "extend" operand.
+ OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
+ // We can only continue if no tokens were eaten.
+ if (GotShift != MatchOperand_NoMatch)
+ return GotShift;
+
+ // This was not a register so parse other operands that start with an
+ // identifier (like labels) as expressions and create them as immediates.
+ const MCExpr *IdVal;
+ S = getLoc();
+ if (getParser().parseExpression(IdVal))
+ return true;
+
+ E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
return false;
}
- case AArch64::TLBIix: {
- int64_t ImmVal = Inst.getOperand(0).getImm();
- A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
- if (!A64TLBI::NeedsRegister(TLBIOp)) {
- return Error(Operands[1]->getStartLoc(),
- "specified TLBI op does not use a register");
+ case AsmToken::Integer:
+ case AsmToken::Real:
+ case AsmToken::Hash: {
+ // #42 -> immediate.
+ S = getLoc();
+ if (getLexer().is(AsmToken::Hash))
+ Parser.Lex();
+
+ // Parse a negative sign
+ bool isNegative = false;
+ if (Parser.getTok().is(AsmToken::Minus)) {
+ isNegative = true;
+ // We need to consume this token only when we have a Real, otherwise
+ // we let parseSymbolicImmVal take care of it
+ if (Parser.getLexer().peekTok().is(AsmToken::Real))
+ Parser.Lex();
}
- return false;
- }
- case AArch64::TLBIi: {
- int64_t ImmVal = Inst.getOperand(0).getImm();
- A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
- if (A64TLBI::NeedsRegister(TLBIOp)) {
- return Error(Operands[1]->getStartLoc(),
- "specified TLBI op requires a register");
+
+ // The only Real that should come through here is a literal #0.0 for
+ // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
+ // so convert the value.
+ const AsmToken &Tok = Parser.getTok();
+ if (Tok.is(AsmToken::Real)) {
+ APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
+ uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
+ if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
+ Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
+ Mnemonic != "fcmlt")
+ return TokError("unexpected floating point literal");
+ else if (IntVal != 0 || isNegative)
+ return TokError("expected floating-point constant #0.0");
+ Parser.Lex(); // Eat the token.
+
+ Operands.push_back(
+ AArch64Operand::CreateToken("#0", false, S, getContext()));
+ Operands.push_back(
+ AArch64Operand::CreateToken(".0", false, S, getContext()));
+ return false;
}
+
+ const MCExpr *ImmVal;
+ if (parseSymbolicImmVal(ImmVal))
+ return true;
+
+ E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
return false;
}
}
-
- return false;
}
-
-// Parses the instruction *together with* all operands, appending each parsed
-// operand to the "Operands" list
+/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
+/// operands.
bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- StringRef PatchedName = StringSwitch<StringRef>(Name.lower())
- .Case("beq", "b.eq")
- .Case("bne", "b.ne")
- .Case("bhs", "b.hs")
- .Case("bcs", "b.cs")
- .Case("blo", "b.lo")
- .Case("bcc", "b.cc")
- .Case("bmi", "b.mi")
- .Case("bpl", "b.pl")
- .Case("bvs", "b.vs")
- .Case("bvc", "b.vc")
- .Case("bhi", "b.hi")
- .Case("bls", "b.ls")
- .Case("bge", "b.ge")
- .Case("blt", "b.lt")
- .Case("bgt", "b.gt")
- .Case("ble", "b.le")
- .Case("bal", "b.al")
- .Case("bnv", "b.nv")
- .Default(Name);
-
- size_t CondCodePos = PatchedName.find('.');
-
- StringRef Mnemonic = PatchedName.substr(0, CondCodePos);
- Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
-
- if (CondCodePos != StringRef::npos) {
- // We have a condition code
- SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
- StringRef CondStr = PatchedName.substr(CondCodePos + 1, StringRef::npos);
- A64CC::CondCodes Code;
-
- Code = A64StringToCondCode(CondStr);
-
- if (Code == A64CC::Invalid) {
- Error(S, "invalid condition code");
+ OperandVector &Operands) {
+ Name = StringSwitch<StringRef>(Name.lower())
+ .Case("beq", "b.eq")
+ .Case("bne", "b.ne")
+ .Case("bhs", "b.hs")
+ .Case("bcs", "b.cs")
+ .Case("blo", "b.lo")
+ .Case("bcc", "b.cc")
+ .Case("bmi", "b.mi")
+ .Case("bpl", "b.pl")
+ .Case("bvs", "b.vs")
+ .Case("bvc", "b.vc")
+ .Case("bhi", "b.hi")
+ .Case("bls", "b.ls")
+ .Case("bge", "b.ge")
+ .Case("blt", "b.lt")
+ .Case("bgt", "b.gt")
+ .Case("ble", "b.le")
+ .Case("bal", "b.al")
+ .Case("bnv", "b.nv")
+ .Default(Name);
+
+ // Create the leading tokens for the mnemonic, split by '.' characters.
+ size_t Start = 0, Next = Name.find('.');
+ StringRef Head = Name.slice(Start, Next);
+
+ // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
+ if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
+ bool IsError = parseSysAlias(Head, NameLoc, Operands);
+ if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
Parser.eatToEndOfStatement();
- return true;
- }
-
- SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
-
- Operands.push_back(AArch64Operand::CreateToken(".", DotL));
- SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
- Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
+ return IsError;
}
- // Now we parse the operands of this instruction
+ Operands.push_back(
+ AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
+ Mnemonic = Head;
+
+ // Handle condition codes for a branch mnemonic
+ if (Head == "b" && Next != StringRef::npos) {
+ Start = Next;
+ Next = Name.find('.', Start + 1);
+ Head = Name.slice(Start + 1, Next);
+
+ SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
+ (Head.data() - Name.data()));
+ AArch64CC::CondCode CC = parseCondCodeString(Head);
+ if (CC == AArch64CC::Invalid)
+ return Error(SuffixLoc, "invalid condition code");
+ Operands.push_back(
+ AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
+ Operands.push_back(
+ AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
+ }
+
+ // Add the remaining tokens in the mnemonic.
+ while (Next != StringRef::npos) {
+ Start = Next;
+ Next = Name.find('.', Start + 1);
+ Head = Name.slice(Start, Next);
+ SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
+ (Head.data() - Name.data()) + 1);
+ Operands.push_back(
+ AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
+ }
+
+ // Conditional compare instructions have a Condition Code operand, which needs
+ // to be parsed and an immediate operand created.
+ bool condCodeFourthOperand =
+ (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
+ Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
+ Head == "csinc" || Head == "csinv" || Head == "csneg");
+
+ // These instructions are aliases to some of the conditional select
+ // instructions. However, the condition code is inverted in the aliased
+ // instruction.
+ //
+ // FIXME: Is this the correct way to handle these? Or should the parser
+ // generate the aliased instructions directly?
+ bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
+ bool condCodeThirdOperand =
+ (Head == "cinc" || Head == "cinv" || Head == "cneg");
+
+ // Read the remaining operands.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
// Read the first operand.
- if (ParseOperand(Operands, Mnemonic)) {
+ if (parseOperand(Operands, false, false)) {
Parser.eatToEndOfStatement();
return true;
}
+ unsigned N = 2;
while (getLexer().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat the comma.
+ Parser.Lex(); // Eat the comma.
// Parse and remember the operand.
- if (ParseOperand(Operands, Mnemonic)) {
+ if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
+ (N == 3 && condCodeThirdOperand) ||
+ (N == 2 && condCodeSecondOperand),
+ condCodeSecondOperand || condCodeThirdOperand)) {
Parser.eatToEndOfStatement();
return true;
}
-
// After successfully parsing some operands there are two special cases to
// consider (i.e. notional operands not separated by commas). Both are due
// to memory specifiers:
@@ -2321,52 +3121,716 @@ bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
// in the given context!
if (Parser.getTok().is(AsmToken::RBrac)) {
SMLoc Loc = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateToken("]", Loc));
+ Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
+ getContext()));
Parser.Lex();
}
if (Parser.getTok().is(AsmToken::Exclaim)) {
SMLoc Loc = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateToken("!", Loc));
+ Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
+ getContext()));
Parser.Lex();
}
+
+ ++N;
}
}
if (getLexer().isNot(AsmToken::EndOfStatement)) {
- SMLoc Loc = getLexer().getLoc();
+ SMLoc Loc = Parser.getTok().getLoc();
Parser.eatToEndOfStatement();
- return Error(Loc, "expected comma before next operand");
+ return Error(Loc, "unexpected token in argument list");
}
- // Eat the EndOfStatement
- Parser.Lex();
-
+ Parser.Lex(); // Consume the EndOfStatement
return false;
}
+// FIXME: This entire function is a giant hack to provide us with decent
+// operand range validation/diagnostics until TableGen/MC can be extended
+// to support autogeneration of this kind of validation.
+bool AArch64AsmParser::validateInstruction(MCInst &Inst,
+ SmallVectorImpl<SMLoc> &Loc) {
+ const MCRegisterInfo *RI = getContext().getRegisterInfo();
+ // Check for indexed addressing modes w/ the base register being the
+ // same as a destination/source register or pair load where
+ // the Rt == Rt2. All of those are undefined behaviour.
+ switch (Inst.getOpcode()) {
+ case AArch64::LDPSWpre:
+ case AArch64::LDPWpost:
+ case AArch64::LDPWpre:
+ case AArch64::LDPXpost:
+ case AArch64::LDPXpre: {
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rt2 = Inst.getOperand(2).getReg();
+ unsigned Rn = Inst.getOperand(3).getReg();
+ if (RI->isSubRegisterEq(Rn, Rt))
+ return Error(Loc[0], "unpredictable LDP instruction, writeback base "
+ "is also a destination");
+ if (RI->isSubRegisterEq(Rn, Rt2))
+ return Error(Loc[1], "unpredictable LDP instruction, writeback base "
+ "is also a destination");
+ // FALLTHROUGH
+ }
+ case AArch64::LDPDi:
+ case AArch64::LDPQi:
+ case AArch64::LDPSi:
+ case AArch64::LDPSWi:
+ case AArch64::LDPWi:
+ case AArch64::LDPXi: {
+ unsigned Rt = Inst.getOperand(0).getReg();
+ unsigned Rt2 = Inst.getOperand(1).getReg();
+ if (Rt == Rt2)
+ return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
+ break;
+ }
+ case AArch64::LDPDpost:
+ case AArch64::LDPDpre:
+ case AArch64::LDPQpost:
+ case AArch64::LDPQpre:
+ case AArch64::LDPSpost:
+ case AArch64::LDPSpre:
+ case AArch64::LDPSWpost: {
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rt2 = Inst.getOperand(2).getReg();
+ if (Rt == Rt2)
+ return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
+ break;
+ }
+ case AArch64::STPDpost:
+ case AArch64::STPDpre:
+ case AArch64::STPQpost:
+ case AArch64::STPQpre:
+ case AArch64::STPSpost:
+ case AArch64::STPSpre:
+ case AArch64::STPWpost:
+ case AArch64::STPWpre:
+ case AArch64::STPXpost:
+ case AArch64::STPXpre: {
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rt2 = Inst.getOperand(2).getReg();
+ unsigned Rn = Inst.getOperand(3).getReg();
+ if (RI->isSubRegisterEq(Rn, Rt))
+ return Error(Loc[0], "unpredictable STP instruction, writeback base "
+ "is also a source");
+ if (RI->isSubRegisterEq(Rn, Rt2))
+ return Error(Loc[1], "unpredictable STP instruction, writeback base "
+ "is also a source");
+ break;
+ }
+ case AArch64::LDRBBpre:
+ case AArch64::LDRBpre:
+ case AArch64::LDRHHpre:
+ case AArch64::LDRHpre:
+ case AArch64::LDRSBWpre:
+ case AArch64::LDRSBXpre:
+ case AArch64::LDRSHWpre:
+ case AArch64::LDRSHXpre:
+ case AArch64::LDRSWpre:
+ case AArch64::LDRWpre:
+ case AArch64::LDRXpre:
+ case AArch64::LDRBBpost:
+ case AArch64::LDRBpost:
+ case AArch64::LDRHHpost:
+ case AArch64::LDRHpost:
+ case AArch64::LDRSBWpost:
+ case AArch64::LDRSBXpost:
+ case AArch64::LDRSHWpost:
+ case AArch64::LDRSHXpost:
+ case AArch64::LDRSWpost:
+ case AArch64::LDRWpost:
+ case AArch64::LDRXpost: {
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rn = Inst.getOperand(2).getReg();
+ if (RI->isSubRegisterEq(Rn, Rt))
+ return Error(Loc[0], "unpredictable LDR instruction, writeback base "
+ "is also a source");
+ break;
+ }
+ case AArch64::STRBBpost:
+ case AArch64::STRBpost:
+ case AArch64::STRHHpost:
+ case AArch64::STRHpost:
+ case AArch64::STRWpost:
+ case AArch64::STRXpost:
+ case AArch64::STRBBpre:
+ case AArch64::STRBpre:
+ case AArch64::STRHHpre:
+ case AArch64::STRHpre:
+ case AArch64::STRWpre:
+ case AArch64::STRXpre: {
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rn = Inst.getOperand(2).getReg();
+ if (RI->isSubRegisterEq(Rn, Rt))
+ return Error(Loc[0], "unpredictable STR instruction, writeback base "
+ "is also a source");
+ break;
+ }
+ }
+
+ // Now check immediate ranges. Separate from the above as there is overlap
+ // in the instructions being checked and this keeps the nested conditionals
+ // to a minimum.
+ switch (Inst.getOpcode()) {
+ case AArch64::ADDSWri:
+ case AArch64::ADDSXri:
+ case AArch64::ADDWri:
+ case AArch64::ADDXri:
+ case AArch64::SUBSWri:
+ case AArch64::SUBSXri:
+ case AArch64::SUBWri:
+ case AArch64::SUBXri: {
+ // Annoyingly we can't do this in the isAddSubImm predicate, so there is
+ // some slight duplication here.
+ if (Inst.getOperand(2).isExpr()) {
+ const MCExpr *Expr = Inst.getOperand(2).getExpr();
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
+ return Error(Loc[2], "invalid immediate expression");
+ }
+
+ // Only allow these with ADDXri.
+ if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
+ DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
+ Inst.getOpcode() == AArch64::ADDXri)
+ return false;
+
+ // Only allow these with ADDXri/ADDWri
+ if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
+ (Inst.getOpcode() == AArch64::ADDXri ||
+ Inst.getOpcode() == AArch64::ADDWri))
+ return false;
+
+ // Don't allow expressions in the immediate field otherwise
+ return Error(Loc[2], "invalid immediate expression");
+ }
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
+ switch (ErrCode) {
+ case Match_MissingFeature:
+ return Error(Loc,
+ "instruction requires a CPU feature not currently enabled");
+ case Match_InvalidOperand:
+ return Error(Loc, "invalid operand for instruction");
+ case Match_InvalidSuffix:
+ return Error(Loc, "invalid type suffix for instruction");
+ case Match_InvalidCondCode:
+ return Error(Loc, "expected AArch64 condition code");
+ case Match_AddSubRegExtendSmall:
+ return Error(Loc,
+ "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
+ case Match_AddSubRegExtendLarge:
+ return Error(Loc,
+ "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
+ case Match_AddSubSecondSource:
+ return Error(Loc,
+ "expected compatible register, symbol or integer in range [0, 4095]");
+ case Match_LogicalSecondSource:
+ return Error(Loc, "expected compatible register or logical immediate");
+ case Match_InvalidMovImm32Shift:
+ return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
+ case Match_InvalidMovImm64Shift:
+ return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
+ case Match_AddSubRegShift32:
+ return Error(Loc,
+ "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
+ case Match_AddSubRegShift64:
+ return Error(Loc,
+ "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
+ case Match_InvalidFPImm:
+ return Error(Loc,
+ "expected compatible register or floating-point constant");
+ case Match_InvalidMemoryIndexedSImm9:
+ return Error(Loc, "index must be an integer in range [-256, 255].");
+ case Match_InvalidMemoryIndexed4SImm7:
+ return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
+ case Match_InvalidMemoryIndexed8SImm7:
+ return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
+ case Match_InvalidMemoryIndexed16SImm7:
+ return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
+ case Match_InvalidMemoryWExtend8:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0");
+ case Match_InvalidMemoryWExtend16:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
+ case Match_InvalidMemoryWExtend32:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
+ case Match_InvalidMemoryWExtend64:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
+ case Match_InvalidMemoryWExtend128:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
+ case Match_InvalidMemoryXExtend8:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0");
+ case Match_InvalidMemoryXExtend16:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
+ case Match_InvalidMemoryXExtend32:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
+ case Match_InvalidMemoryXExtend64:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
+ case Match_InvalidMemoryXExtend128:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
+ case Match_InvalidMemoryIndexed1:
+ return Error(Loc, "index must be an integer in range [0, 4095].");
+ case Match_InvalidMemoryIndexed2:
+ return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
+ case Match_InvalidMemoryIndexed4:
+ return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
+ case Match_InvalidMemoryIndexed8:
+ return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
+ case Match_InvalidMemoryIndexed16:
+ return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
+ case Match_InvalidImm0_7:
+ return Error(Loc, "immediate must be an integer in range [0, 7].");
+ case Match_InvalidImm0_15:
+ return Error(Loc, "immediate must be an integer in range [0, 15].");
+ case Match_InvalidImm0_31:
+ return Error(Loc, "immediate must be an integer in range [0, 31].");
+ case Match_InvalidImm0_63:
+ return Error(Loc, "immediate must be an integer in range [0, 63].");
+ case Match_InvalidImm0_127:
+ return Error(Loc, "immediate must be an integer in range [0, 127].");
+ case Match_InvalidImm0_65535:
+ return Error(Loc, "immediate must be an integer in range [0, 65535].");
+ case Match_InvalidImm1_8:
+ return Error(Loc, "immediate must be an integer in range [1, 8].");
+ case Match_InvalidImm1_16:
+ return Error(Loc, "immediate must be an integer in range [1, 16].");
+ case Match_InvalidImm1_32:
+ return Error(Loc, "immediate must be an integer in range [1, 32].");
+ case Match_InvalidImm1_64:
+ return Error(Loc, "immediate must be an integer in range [1, 64].");
+ case Match_InvalidIndex1:
+ return Error(Loc, "expected lane specifier '[1]'");
+ case Match_InvalidIndexB:
+ return Error(Loc, "vector lane must be an integer in range [0, 15].");
+ case Match_InvalidIndexH:
+ return Error(Loc, "vector lane must be an integer in range [0, 7].");
+ case Match_InvalidIndexS:
+ return Error(Loc, "vector lane must be an integer in range [0, 3].");
+ case Match_InvalidIndexD:
+ return Error(Loc, "vector lane must be an integer in range [0, 1].");
+ case Match_InvalidLabel:
+ return Error(Loc, "expected label or encodable integer pc offset");
+ case Match_MRS:
+ return Error(Loc, "expected readable system register");
+ case Match_MSR:
+ return Error(Loc, "expected writable system register or pstate");
+ case Match_MnemonicFail:
+ return Error(Loc, "unrecognized instruction mnemonic");
+ default:
+ assert(0 && "unexpected error code!");
+ return Error(Loc, "invalid instruction format");
+ }
+}
+
+static const char *getSubtargetFeatureName(unsigned Val);
+
+bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
+ assert(!Operands.empty() && "Unexpect empty operand list!");
+ AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[0]);
+ assert(Op->isToken() && "Leading operand should always be a mnemonic!");
+
+ StringRef Tok = Op->getToken();
+ unsigned NumOperands = Operands.size();
+
+ if (NumOperands == 4 && Tok == "lsl") {
+ AArch64Operand *Op2 = static_cast<AArch64Operand *>(Operands[2]);
+ AArch64Operand *Op3 = static_cast<AArch64Operand *>(Operands[3]);
+ if (Op2->isReg() && Op3->isImm()) {
+ const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
+ if (Op3CE) {
+ uint64_t Op3Val = Op3CE->getValue();
+ uint64_t NewOp3Val = 0;
+ uint64_t NewOp4Val = 0;
+ if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
+ Op2->getReg())) {
+ NewOp3Val = (32 - Op3Val) & 0x1f;
+ NewOp4Val = 31 - Op3Val;
+ } else {
+ NewOp3Val = (64 - Op3Val) & 0x3f;
+ NewOp4Val = 63 - Op3Val;
+ }
+
+ const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
+ const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
+
+ Operands[0] = AArch64Operand::CreateToken(
+ "ubfm", false, Op->getStartLoc(), getContext());
+ Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
+ Op3->getEndLoc(), getContext());
+ Operands.push_back(AArch64Operand::CreateImm(
+ NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
+ delete Op3;
+ delete Op;
+ }
+ }
+ } else if (NumOperands == 5) {
+ // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
+ // UBFIZ -> UBFM aliases.
+ if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
+ AArch64Operand *Op1 = static_cast<AArch64Operand *>(Operands[1]);
+ AArch64Operand *Op3 = static_cast<AArch64Operand *>(Operands[3]);
+ AArch64Operand *Op4 = static_cast<AArch64Operand *>(Operands[4]);
+
+ if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
+ const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
+ const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
+
+ if (Op3CE && Op4CE) {
+ uint64_t Op3Val = Op3CE->getValue();
+ uint64_t Op4Val = Op4CE->getValue();
+
+ uint64_t RegWidth = 0;
+ if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
+ Op1->getReg()))
+ RegWidth = 64;
+ else
+ RegWidth = 32;
+
+ if (Op3Val >= RegWidth)
+ return Error(Op3->getStartLoc(),
+ "expected integer in range [0, 31]");
+ if (Op4Val < 1 || Op4Val > RegWidth)
+ return Error(Op4->getStartLoc(),
+ "expected integer in range [1, 32]");
+
+ uint64_t NewOp3Val = 0;
+ if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
+ Op1->getReg()))
+ NewOp3Val = (32 - Op3Val) & 0x1f;
+ else
+ NewOp3Val = (64 - Op3Val) & 0x3f;
+
+ uint64_t NewOp4Val = Op4Val - 1;
+
+ if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
+ return Error(Op4->getStartLoc(),
+ "requested insert overflows register");
+
+ const MCExpr *NewOp3 =
+ MCConstantExpr::Create(NewOp3Val, getContext());
+ const MCExpr *NewOp4 =
+ MCConstantExpr::Create(NewOp4Val, getContext());
+ Operands[3] = AArch64Operand::CreateImm(
+ NewOp3, Op3->getStartLoc(), Op3->getEndLoc(), getContext());
+ Operands[4] = AArch64Operand::CreateImm(
+ NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
+ if (Tok == "bfi")
+ Operands[0] = AArch64Operand::CreateToken(
+ "bfm", false, Op->getStartLoc(), getContext());
+ else if (Tok == "sbfiz")
+ Operands[0] = AArch64Operand::CreateToken(
+ "sbfm", false, Op->getStartLoc(), getContext());
+ else if (Tok == "ubfiz")
+ Operands[0] = AArch64Operand::CreateToken(
+ "ubfm", false, Op->getStartLoc(), getContext());
+ else
+ llvm_unreachable("No valid mnemonic for alias?");
+
+ delete Op;
+ delete Op3;
+ delete Op4;
+ }
+ }
+
+ // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
+ // UBFX -> UBFM aliases.
+ } else if (NumOperands == 5 &&
+ (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
+ AArch64Operand *Op1 = static_cast<AArch64Operand *>(Operands[1]);
+ AArch64Operand *Op3 = static_cast<AArch64Operand *>(Operands[3]);
+ AArch64Operand *Op4 = static_cast<AArch64Operand *>(Operands[4]);
+
+ if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
+ const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
+ const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
+
+ if (Op3CE && Op4CE) {
+ uint64_t Op3Val = Op3CE->getValue();
+ uint64_t Op4Val = Op4CE->getValue();
+
+ uint64_t RegWidth = 0;
+ if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
+ Op1->getReg()))
+ RegWidth = 64;
+ else
+ RegWidth = 32;
+
+ if (Op3Val >= RegWidth)
+ return Error(Op3->getStartLoc(),
+ "expected integer in range [0, 31]");
+ if (Op4Val < 1 || Op4Val > RegWidth)
+ return Error(Op4->getStartLoc(),
+ "expected integer in range [1, 32]");
+
+ uint64_t NewOp4Val = Op3Val + Op4Val - 1;
+
+ if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
+ return Error(Op4->getStartLoc(),
+ "requested extract overflows register");
+
+ const MCExpr *NewOp4 =
+ MCConstantExpr::Create(NewOp4Val, getContext());
+ Operands[4] = AArch64Operand::CreateImm(
+ NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
+ if (Tok == "bfxil")
+ Operands[0] = AArch64Operand::CreateToken(
+ "bfm", false, Op->getStartLoc(), getContext());
+ else if (Tok == "sbfx")
+ Operands[0] = AArch64Operand::CreateToken(
+ "sbfm", false, Op->getStartLoc(), getContext());
+ else if (Tok == "ubfx")
+ Operands[0] = AArch64Operand::CreateToken(
+ "ubfm", false, Op->getStartLoc(), getContext());
+ else
+ llvm_unreachable("No valid mnemonic for alias?");
+
+ delete Op;
+ delete Op4;
+ }
+ }
+ }
+ }
+ // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
+ // InstAlias can't quite handle this since the reg classes aren't
+ // subclasses.
+ if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
+ // The source register can be Wn here, but the matcher expects a
+ // GPR64. Twiddle it here if necessary.
+ AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[2]);
+ if (Op->isReg()) {
+ unsigned Reg = getXRegFromWReg(Op->getReg());
+ Operands[2] = AArch64Operand::CreateReg(Reg, false, Op->getStartLoc(),
+ Op->getEndLoc(), getContext());
+ delete Op;
+ }
+ }
+ // FIXME: Likewise for sxt[bh] with a Xd dst operand
+ else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
+ AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[1]);
+ if (Op->isReg() &&
+ AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
+ Op->getReg())) {
+ // The source register can be Wn here, but the matcher expects a
+ // GPR64. Twiddle it here if necessary.
+ AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[2]);
+ if (Op->isReg()) {
+ unsigned Reg = getXRegFromWReg(Op->getReg());
+ Operands[2] = AArch64Operand::CreateReg(Reg, false, Op->getStartLoc(),
+ Op->getEndLoc(), getContext());
+ delete Op;
+ }
+ }
+ }
+ // FIXME: Likewise for uxt[bh] with a Xd dst operand
+ else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
+ AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[1]);
+ if (Op->isReg() &&
+ AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
+ Op->getReg())) {
+ // The source register can be Wn here, but the matcher expects a
+ // GPR32. Twiddle it here if necessary.
+ AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[1]);
+ if (Op->isReg()) {
+ unsigned Reg = getWRegFromXReg(Op->getReg());
+ Operands[1] = AArch64Operand::CreateReg(Reg, false, Op->getStartLoc(),
+ Op->getEndLoc(), getContext());
+ delete Op;
+ }
+ }
+ }
+
+ // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
+ if (NumOperands == 3 && Tok == "fmov") {
+ AArch64Operand *RegOp = static_cast<AArch64Operand *>(Operands[1]);
+ AArch64Operand *ImmOp = static_cast<AArch64Operand *>(Operands[2]);
+ if (RegOp->isReg() && ImmOp->isFPImm() &&
+ ImmOp->getFPImm() == (unsigned)-1) {
+ unsigned zreg =
+ AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
+ RegOp->getReg())
+ ? AArch64::WZR
+ : AArch64::XZR;
+ Operands[2] = AArch64Operand::CreateReg(zreg, false, Op->getStartLoc(),
+ Op->getEndLoc(), getContext());
+ delete ImmOp;
+ }
+ }
+
+ MCInst Inst;
+ // First try to match against the secondary set of tables containing the
+ // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
+ unsigned MatchResult =
+ MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
+
+ // If that fails, try against the alternate table containing long-form NEON:
+ // "fadd v0.2s, v1.2s, v2.2s"
+ if (MatchResult != Match_Success)
+ MatchResult =
+ MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
+
+ switch (MatchResult) {
+ case Match_Success: {
+ // Perform range checking and other semantic validations
+ SmallVector<SMLoc, 8> OperandLocs;
+ NumOperands = Operands.size();
+ for (unsigned i = 1; i < NumOperands; ++i)
+ OperandLocs.push_back(Operands[i]->getStartLoc());
+ if (validateInstruction(Inst, OperandLocs))
+ return true;
+
+ Inst.setLoc(IDLoc);
+ Out.EmitInstruction(Inst, STI);
+ return false;
+ }
+ case Match_MissingFeature: {
+ assert(ErrorInfo && "Unknown missing feature!");
+ // Special case the error message for the very common case where only
+ // a single subtarget feature is missing (neon, e.g.).
+ std::string Msg = "instruction requires:";
+ unsigned Mask = 1;
+ for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
+ if (ErrorInfo & Mask) {
+ Msg += " ";
+ Msg += getSubtargetFeatureName(ErrorInfo & Mask);
+ }
+ Mask <<= 1;
+ }
+ return Error(IDLoc, Msg);
+ }
+ case Match_MnemonicFail:
+ return showMatchError(IDLoc, MatchResult);
+ case Match_InvalidOperand: {
+ SMLoc ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0U) {
+ if (ErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction");
+
+ ErrorLoc = ((AArch64Operand *)Operands[ErrorInfo])->getStartLoc();
+ if (ErrorLoc == SMLoc())
+ ErrorLoc = IDLoc;
+ }
+ // If the match failed on a suffix token operand, tweak the diagnostic
+ // accordingly.
+ if (((AArch64Operand *)Operands[ErrorInfo])->isToken() &&
+ ((AArch64Operand *)Operands[ErrorInfo])->isTokenSuffix())
+ MatchResult = Match_InvalidSuffix;
+
+ return showMatchError(ErrorLoc, MatchResult);
+ }
+ case Match_InvalidMemoryIndexed1:
+ case Match_InvalidMemoryIndexed2:
+ case Match_InvalidMemoryIndexed4:
+ case Match_InvalidMemoryIndexed8:
+ case Match_InvalidMemoryIndexed16:
+ case Match_InvalidCondCode:
+ case Match_AddSubRegExtendSmall:
+ case Match_AddSubRegExtendLarge:
+ case Match_AddSubSecondSource:
+ case Match_LogicalSecondSource:
+ case Match_AddSubRegShift32:
+ case Match_AddSubRegShift64:
+ case Match_InvalidMovImm32Shift:
+ case Match_InvalidMovImm64Shift:
+ case Match_InvalidFPImm:
+ case Match_InvalidMemoryWExtend8:
+ case Match_InvalidMemoryWExtend16:
+ case Match_InvalidMemoryWExtend32:
+ case Match_InvalidMemoryWExtend64:
+ case Match_InvalidMemoryWExtend128:
+ case Match_InvalidMemoryXExtend8:
+ case Match_InvalidMemoryXExtend16:
+ case Match_InvalidMemoryXExtend32:
+ case Match_InvalidMemoryXExtend64:
+ case Match_InvalidMemoryXExtend128:
+ case Match_InvalidMemoryIndexed4SImm7:
+ case Match_InvalidMemoryIndexed8SImm7:
+ case Match_InvalidMemoryIndexed16SImm7:
+ case Match_InvalidMemoryIndexedSImm9:
+ case Match_InvalidImm0_7:
+ case Match_InvalidImm0_15:
+ case Match_InvalidImm0_31:
+ case Match_InvalidImm0_63:
+ case Match_InvalidImm0_127:
+ case Match_InvalidImm0_65535:
+ case Match_InvalidImm1_8:
+ case Match_InvalidImm1_16:
+ case Match_InvalidImm1_32:
+ case Match_InvalidImm1_64:
+ case Match_InvalidIndex1:
+ case Match_InvalidIndexB:
+ case Match_InvalidIndexH:
+ case Match_InvalidIndexS:
+ case Match_InvalidIndexD:
+ case Match_InvalidLabel:
+ case Match_MSR:
+ case Match_MRS: {
+ // Any time we get here, there's nothing fancy to do. Just get the
+ // operand SMLoc and display the diagnostic.
+ SMLoc ErrorLoc = ((AArch64Operand *)Operands[ErrorInfo])->getStartLoc();
+ if (ErrorLoc == SMLoc())
+ ErrorLoc = IDLoc;
+ return showMatchError(ErrorLoc, MatchResult);
+ }
+ }
+
+ llvm_unreachable("Implement any new match types added!");
+ return true;
+}
+
+/// ParseDirective parses the arm specific directives
bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getIdentifier();
+ SMLoc Loc = DirectiveID.getLoc();
if (IDVal == ".hword")
- return ParseDirectiveWord(2, DirectiveID.getLoc());
- else if (IDVal == ".word")
- return ParseDirectiveWord(4, DirectiveID.getLoc());
- else if (IDVal == ".xword")
- return ParseDirectiveWord(8, DirectiveID.getLoc());
- else if (IDVal == ".tlsdesccall")
- return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
-
- return true;
+ return parseDirectiveWord(2, Loc);
+ if (IDVal == ".word")
+ return parseDirectiveWord(4, Loc);
+ if (IDVal == ".xword")
+ return parseDirectiveWord(8, Loc);
+ if (IDVal == ".tlsdesccall")
+ return parseDirectiveTLSDescCall(Loc);
+
+ return parseDirectiveLOH(IDVal, Loc);
}
/// parseDirectiveWord
/// ::= .word [ expression (, expression)* ]
-bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
+bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
if (getLexer().isNot(AsmToken::EndOfStatement)) {
for (;;) {
const MCExpr *Value;
if (getParser().parseExpression(Value))
- return false;
+ return true;
getParser().getStreamer().EmitValue(Value, Size);
@@ -2374,10 +3838,8 @@ bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
break;
// FIXME: Improve diagnostic.
- if (getLexer().isNot(AsmToken::Comma)) {
- Error(L, "unexpected token in directive");
- return false;
- }
+ if (getLexer().isNot(AsmToken::Comma))
+ return Error(L, "unexpected token in directive");
Parser.Lex();
}
}
@@ -2388,15 +3850,14 @@ bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
// parseDirectiveTLSDescCall:
// ::= .tlsdesccall symbol
-bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
+bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
StringRef Name;
- if (getParser().parseIdentifier(Name)) {
- Error(L, "expected symbol after directive");
- return false;
- }
+ if (getParser().parseIdentifier(Name))
+ return Error(L, "expected symbol after directive");
MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
- const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
+ const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
+ Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
MCInst Inst;
Inst.setOpcode(AArch64::TLSDESCCALL);
@@ -2406,271 +3867,181 @@ bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
return false;
}
+/// ::= .loh <lohName | lohId> label1, ..., labelN
+/// The number of arguments depends on the loh identifier.
+bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
+ if (IDVal != MCLOHDirectiveName())
+ return true;
+ MCLOHType Kind;
+ if (getParser().getTok().isNot(AsmToken::Identifier)) {
+ if (getParser().getTok().isNot(AsmToken::Integer))
+ return TokError("expected an identifier or a number in directive");
+ // We successfully get a numeric value for the identifier.
+ // Check if it is valid.
+ int64_t Id = getParser().getTok().getIntVal();
+ Kind = (MCLOHType)Id;
+ // Check that Id does not overflow MCLOHType.
+ if (!isValidMCLOHType(Kind) || Id != Kind)
+ return TokError("invalid numeric identifier in directive");
+ } else {
+ StringRef Name = getTok().getIdentifier();
+ // We successfully parse an identifier.
+ // Check if it is a recognized one.
+ int Id = MCLOHNameToId(Name);
+
+ if (Id == -1)
+ return TokError("invalid identifier in directive");
+ Kind = (MCLOHType)Id;
+ }
+ // Consume the identifier.
+ Lex();
+ // Get the number of arguments of this LOH.
+ int NbArgs = MCLOHIdToNbArgs(Kind);
+
+ assert(NbArgs != -1 && "Invalid number of arguments");
+
+ SmallVector<MCSymbol *, 3> Args;
+ for (int Idx = 0; Idx < NbArgs; ++Idx) {
+ StringRef Name;
+ if (getParser().parseIdentifier(Name))
+ return TokError("expected identifier in directive");
+ Args.push_back(getContext().GetOrCreateSymbol(Name));
+
+ if (Idx + 1 == NbArgs)
+ break;
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
+ Lex();
+ }
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
-bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) {
- MCInst Inst;
- unsigned MatchResult;
- MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
- MatchingInlineAsm);
+ getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
+ return false;
+}
- if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
- return Error(IDLoc, "too few operands for instruction");
+bool
+AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
+ AArch64MCExpr::VariantKind &ELFRefKind,
+ MCSymbolRefExpr::VariantKind &DarwinRefKind,
+ int64_t &Addend) {
+ ELFRefKind = AArch64MCExpr::VK_INVALID;
+ DarwinRefKind = MCSymbolRefExpr::VK_None;
+ Addend = 0;
+
+ if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
+ ELFRefKind = AE->getKind();
+ Expr = AE->getSubExpr();
+ }
+
+ const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
+ if (SE) {
+ // It's a simple symbol reference with no addend.
+ DarwinRefKind = SE->getKind();
+ return true;
+ }
- switch (MatchResult) {
- default: break;
- case Match_Success:
- if (validateInstruction(Inst, Operands))
- return true;
+ const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
+ if (!BE)
+ return false;
- Out.EmitInstruction(Inst, STI);
+ SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
+ if (!SE)
return false;
- case Match_MissingFeature:
- Error(IDLoc, "instruction requires a CPU feature not currently enabled");
- return true;
- case Match_InvalidOperand: {
- SMLoc ErrorLoc = IDLoc;
- if (ErrorInfo != ~0U) {
- ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
- if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
- }
+ DarwinRefKind = SE->getKind();
- return Error(ErrorLoc, "invalid operand for instruction");
- }
- case Match_MnemonicFail:
- return Error(IDLoc, "invalid instruction");
+ if (BE->getOpcode() != MCBinaryExpr::Add &&
+ BE->getOpcode() != MCBinaryExpr::Sub)
+ return false;
- case Match_AddSubRegExtendSmall:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
- case Match_AddSubRegExtendLarge:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
- case Match_AddSubRegShift32:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
- case Match_AddSubRegShift64:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
- case Match_AddSubSecondSource:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected compatible register, symbol or integer in range [0, 4095]");
- case Match_CVTFixedPos32:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 32]");
- case Match_CVTFixedPos64:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 64]");
- case Match_CondCode:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected AArch64 condition code");
- case Match_FPImm:
- // Any situation which allows a nontrivial floating-point constant also
- // allows a register.
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected compatible register or floating-point constant");
- case Match_FPZero:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected floating-point constant #0.0 or invalid register type");
- case Match_Label:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected label or encodable integer pc offset");
- case Match_Lane1:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected lane specifier '[1]'");
- case Match_LoadStoreExtend32_1:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'uxtw' or 'sxtw' with optional shift of #0");
- case Match_LoadStoreExtend32_2:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
- case Match_LoadStoreExtend32_4:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
- case Match_LoadStoreExtend32_8:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
- case Match_LoadStoreExtend32_16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
- case Match_LoadStoreExtend64_1:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtx' with optional shift of #0");
- case Match_LoadStoreExtend64_2:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
- case Match_LoadStoreExtend64_4:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
- case Match_LoadStoreExtend64_8:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
- case Match_LoadStoreExtend64_16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
- case Match_LoadStoreSImm7_4:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer multiple of 4 in range [-256, 252]");
- case Match_LoadStoreSImm7_8:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer multiple of 8 in range [-512, 504]");
- case Match_LoadStoreSImm7_16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer multiple of 16 in range [-1024, 1008]");
- case Match_LoadStoreSImm9:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [-256, 255]");
- case Match_LoadStoreUImm12_1:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic reference or integer in range [0, 4095]");
- case Match_LoadStoreUImm12_2:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic reference or integer in range [0, 8190]");
- case Match_LoadStoreUImm12_4:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic reference or integer in range [0, 16380]");
- case Match_LoadStoreUImm12_8:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic reference or integer in range [0, 32760]");
- case Match_LoadStoreUImm12_16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic reference or integer in range [0, 65520]");
- case Match_LogicalSecondSource:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected compatible register or logical immediate");
- case Match_MOVWUImm16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected relocated symbol or integer in range [0, 65535]");
- case Match_MRS:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected readable system register");
- case Match_MSR:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected writable system register or pstate");
- case Match_NamedImm_at:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
- case Match_NamedImm_dbarrier:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 15] or symbolic barrier operand");
- case Match_NamedImm_dc:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic 'dc' operand");
- case Match_NamedImm_ic:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
- case Match_NamedImm_isb:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 15] or 'sy'");
- case Match_NamedImm_prefetch:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
- case Match_NamedImm_tlbi:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected translation buffer invalidation operand");
- case Match_UImm16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 65535]");
- case Match_UImm3:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 7]");
- case Match_UImm4:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 15]");
- case Match_UImm5:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 31]");
- case Match_UImm6:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 63]");
- case Match_UImm7:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 127]");
- case Match_Width32:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [<lsb>, 31]");
- case Match_Width64:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [<lsb>, 63]");
- case Match_ShrImm8:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 8]");
- case Match_ShrImm16:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 16]");
- case Match_ShrImm32:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 32]");
- case Match_ShrImm64:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 64]");
- case Match_ShlImm8:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 7]");
- case Match_ShlImm16:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 15]");
- case Match_ShlImm32:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 31]");
- case Match_ShlImm64:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 63]");
- }
+ // See if the addend is is a constant, otherwise there's more going
+ // on here than we can deal with.
+ auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
+ if (!AddendExpr)
+ return false;
- llvm_unreachable("Implement any new match types added!");
- return true;
+ Addend = AddendExpr->getValue();
+ if (BE->getOpcode() == MCBinaryExpr::Sub)
+ Addend = -Addend;
+
+ // It's some symbol reference + a constant addend, but really
+ // shouldn't use both Darwin and ELF syntax.
+ return ELFRefKind == AArch64MCExpr::VK_INVALID ||
+ DarwinRefKind == MCSymbolRefExpr::VK_None;
}
-void AArch64Operand::print(raw_ostream &OS) const {
+/// Force static initialization.
+extern "C" void LLVMInitializeAArch64AsmParser() {
+ RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
+ RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
+
+ RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
+ RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
+}
+
+#define GET_REGISTER_MATCHER
+#define GET_SUBTARGET_FEATURE_NAME
+#define GET_MATCHER_IMPLEMENTATION
+#include "AArch64GenAsmMatcher.inc"
+
+// Define this matcher function after the auto-generated include so we
+// have the match class enum definitions.
+unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
+ unsigned Kind) {
+ AArch64Operand *Op = static_cast<AArch64Operand *>(AsmOp);
+ // If the kind is a token for a literal immediate, check if our asm
+ // operand matches. This is for InstAliases which have a fixed-value
+ // immediate in the syntax.
+ int64_t ExpectedVal;
switch (Kind) {
- case k_CondCode:
- OS << "<CondCode: " << CondCode.Code << ">";
+ default:
+ return Match_InvalidOperand;
+ case MCK__35_0:
+ ExpectedVal = 0;
break;
- case k_FPImmediate:
- OS << "<fpimm: " << FPImm.Val << ">";
+ case MCK__35_1:
+ ExpectedVal = 1;
break;
- case k_ImmWithLSL:
- OS << "<immwithlsl: imm=" << ImmWithLSL.Val
- << ", shift=" << ImmWithLSL.ShiftAmount << ">";
+ case MCK__35_12:
+ ExpectedVal = 12;
break;
- case k_Immediate:
- getImm()->print(OS);
+ case MCK__35_16:
+ ExpectedVal = 16;
break;
- case k_Register:
- OS << "<register " << getReg() << '>';
+ case MCK__35_2:
+ ExpectedVal = 2;
break;
- case k_Token:
- OS << '\'' << getToken() << '\'';
+ case MCK__35_24:
+ ExpectedVal = 24;
break;
- case k_ShiftExtend:
- OS << "<shift: type=" << ShiftExtend.ShiftType
- << ", amount=" << ShiftExtend.Amount << ">";
+ case MCK__35_3:
+ ExpectedVal = 3;
break;
- case k_SysReg: {
- StringRef Name(SysReg.Data, SysReg.Length);
- OS << "<sysreg: " << Name << '>';
+ case MCK__35_32:
+ ExpectedVal = 32;
break;
- }
- default:
- llvm_unreachable("No idea how to print this kind of operand");
+ case MCK__35_4:
+ ExpectedVal = 4;
+ break;
+ case MCK__35_48:
+ ExpectedVal = 48;
+ break;
+ case MCK__35_6:
+ ExpectedVal = 6;
+ break;
+ case MCK__35_64:
+ ExpectedVal = 64;
+ break;
+ case MCK__35_8:
+ ExpectedVal = 8;
break;
}
+ if (!Op->isImm())
+ return Match_InvalidOperand;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
+ if (!CE)
+ return Match_InvalidOperand;
+ if (CE->getValue() == ExpectedVal)
+ return Match_Success;
+ return Match_InvalidOperand;
}
-
-void AArch64Operand::dump() const {
- print(errs());
-}
-
-
-/// Force static initialization.
-extern "C" void LLVMInitializeAArch64AsmParser() {
- RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
- RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
-}
-
-#define GET_REGISTER_MATCHER
-#define GET_MATCHER_IMPLEMENTATION
-#include "AArch64GenAsmMatcher.inc"