diff options
Diffstat (limited to 'lib/Target/ARM64/MCTargetDesc')
18 files changed, 4482 insertions, 0 deletions
diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64AddressingModes.h b/lib/Target/ARM64/MCTargetDesc/ARM64AddressingModes.h new file mode 100644 index 0000000..7717743 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64AddressingModes.h @@ -0,0 +1,758 @@ +//===- ARM64AddressingModes.h - ARM64 Addressing Modes ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the ARM64 addressing mode implementation stuff. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_ARM64_ARM64ADDRESSINGMODES_H +#define LLVM_TARGET_ARM64_ARM64ADDRESSINGMODES_H + +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/APInt.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" +#include <cassert> + +namespace llvm { + +/// ARM64_AM - ARM64 Addressing Mode Stuff +namespace ARM64_AM { + +//===----------------------------------------------------------------------===// +// Shifts +// + +enum ShiftType { + InvalidShift = -1, + LSL = 0, + LSR = 1, + ASR = 2, + ROR = 3, + MSL = 4 +}; + +/// getShiftName - Get the string encoding for the shift type. +static inline const char *getShiftName(ARM64_AM::ShiftType ST) { + switch (ST) { + default: assert(false && "unhandled shift type!"); + case ARM64_AM::LSL: return "lsl"; + case ARM64_AM::LSR: return "lsr"; + case ARM64_AM::ASR: return "asr"; + case ARM64_AM::ROR: return "ror"; + case ARM64_AM::MSL: return "msl"; + } + return 0; +} + +/// getShiftType - Extract the shift type. +static inline ARM64_AM::ShiftType getShiftType(unsigned Imm) { + return ARM64_AM::ShiftType((Imm >> 6) & 0x7); +} + +/// getShiftValue - Extract the shift value. +static inline unsigned getShiftValue(unsigned Imm) { + return Imm & 0x3f; +} + +/// getShifterImm - Encode the shift type and amount: +/// imm: 6-bit shift amount +/// shifter: 000 ==> lsl +/// 001 ==> lsr +/// 010 ==> asr +/// 011 ==> ror +/// 100 ==> msl +/// {8-6} = shifter +/// {5-0} = imm +static inline unsigned getShifterImm(ARM64_AM::ShiftType ST, unsigned Imm) { + assert((Imm & 0x3f) == Imm && "Illegal shifted immedate value!"); + return (unsigned(ST) << 6) | (Imm & 0x3f); +} + +//===----------------------------------------------------------------------===// +// Extends +// + +enum ExtendType { + InvalidExtend = -1, + UXTB = 0, + UXTH = 1, + UXTW = 2, + UXTX = 3, + SXTB = 4, + SXTH = 5, + SXTW = 6, + SXTX = 7 +}; + +/// getExtendName - Get the string encoding for the extend type. +static inline const char *getExtendName(ARM64_AM::ExtendType ET) { + switch (ET) { + default: assert(false && "unhandled extend type!"); + case ARM64_AM::UXTB: return "uxtb"; + case ARM64_AM::UXTH: return "uxth"; + case ARM64_AM::UXTW: return "uxtw"; + case ARM64_AM::UXTX: return "uxtx"; + case ARM64_AM::SXTB: return "sxtb"; + case ARM64_AM::SXTH: return "sxth"; + case ARM64_AM::SXTW: return "sxtw"; + case ARM64_AM::SXTX: return "sxtx"; + } + return 0; +} + +/// getArithShiftValue - get the arithmetic shift value. +static inline unsigned getArithShiftValue(unsigned Imm) { + return Imm & 0x7; +} + +/// getExtendType - Extract the extend type for operands of arithmetic ops. +static inline ARM64_AM::ExtendType getArithExtendType(unsigned Imm) { + return ARM64_AM::ExtendType((Imm >> 3) & 0x7); +} + +/// getArithExtendImm - Encode the extend type and shift amount for an +/// arithmetic instruction: +/// imm: 3-bit extend amount +/// shifter: 000 ==> uxtb +/// 001 ==> uxth +/// 010 ==> uxtw +/// 011 ==> uxtx +/// 100 ==> sxtb +/// 101 ==> sxth +/// 110 ==> sxtw +/// 111 ==> sxtx +/// {5-3} = shifter +/// {2-0} = imm3 +static inline unsigned getArithExtendImm(ARM64_AM::ExtendType ET, + unsigned Imm) { + assert((Imm & 0x7) == Imm && "Illegal shifted immedate value!"); + return (unsigned(ET) << 3) | (Imm & 0x7); +} + +/// getMemDoShift - Extract the "do shift" flag value for load/store +/// instructions. +static inline bool getMemDoShift(unsigned Imm) { + return (Imm & 0x1) != 0; +} + +/// getExtendType - Extract the extend type for the offset operand of +/// loads/stores. +static inline ARM64_AM::ExtendType getMemExtendType(unsigned Imm) { + return ARM64_AM::ExtendType((Imm >> 1) & 0x7); +} + +/// getExtendImm - Encode the extend type and amount for a load/store inst: +/// doshift: should the offset be scaled by the access size +/// shifter: 000 ==> uxtb +/// 001 ==> uxth +/// 010 ==> uxtw +/// 011 ==> uxtx +/// 100 ==> sxtb +/// 101 ==> sxth +/// 110 ==> sxtw +/// 111 ==> sxtx +/// {3-1} = shifter +/// {0} = doshift +static inline unsigned getMemExtendImm(ARM64_AM::ExtendType ET, bool DoShift) { + return (unsigned(ET) << 1) | unsigned(DoShift); +} + +//===----------------------------------------------------------------------===// +// Prefetch +// + +/// Pre-fetch operator names. +/// The enum values match the encoding values: +/// prfop<4:3> 00=preload data, 10=prepare for store +/// prfop<2:1> 00=target L1 cache, 01=target L2 cache, 10=target L3 cache, +/// prfop<0> 0=non-streaming (temporal), 1=streaming (non-temporal) +enum PrefetchOp { + InvalidPrefetchOp = -1, + PLDL1KEEP = 0x00, + PLDL1STRM = 0x01, + PLDL2KEEP = 0x02, + PLDL2STRM = 0x03, + PLDL3KEEP = 0x04, + PLDL3STRM = 0x05, + PSTL1KEEP = 0x10, + PSTL1STRM = 0x11, + PSTL2KEEP = 0x12, + PSTL2STRM = 0x13, + PSTL3KEEP = 0x14, + PSTL3STRM = 0x15 +}; + +/// isNamedPrefetchOp - Check if the prefetch-op 5-bit value has a name. +static inline bool isNamedPrefetchOp(unsigned prfop) { + switch (prfop) { + default: return false; + case ARM64_AM::PLDL1KEEP: case ARM64_AM::PLDL1STRM: case ARM64_AM::PLDL2KEEP: + case ARM64_AM::PLDL2STRM: case ARM64_AM::PLDL3KEEP: case ARM64_AM::PLDL3STRM: + case ARM64_AM::PSTL1KEEP: case ARM64_AM::PSTL1STRM: case ARM64_AM::PSTL2KEEP: + case ARM64_AM::PSTL2STRM: case ARM64_AM::PSTL3KEEP: case ARM64_AM::PSTL3STRM: + return true; + } +} + + +/// getPrefetchOpName - Get the string encoding for the prefetch operator. +static inline const char *getPrefetchOpName(ARM64_AM::PrefetchOp prfop) { + switch (prfop) { + default: assert(false && "unhandled prefetch-op type!"); + case ARM64_AM::PLDL1KEEP: return "pldl1keep"; + case ARM64_AM::PLDL1STRM: return "pldl1strm"; + case ARM64_AM::PLDL2KEEP: return "pldl2keep"; + case ARM64_AM::PLDL2STRM: return "pldl2strm"; + case ARM64_AM::PLDL3KEEP: return "pldl3keep"; + case ARM64_AM::PLDL3STRM: return "pldl3strm"; + case ARM64_AM::PSTL1KEEP: return "pstl1keep"; + case ARM64_AM::PSTL1STRM: return "pstl1strm"; + case ARM64_AM::PSTL2KEEP: return "pstl2keep"; + case ARM64_AM::PSTL2STRM: return "pstl2strm"; + case ARM64_AM::PSTL3KEEP: return "pstl3keep"; + case ARM64_AM::PSTL3STRM: return "pstl3strm"; + } + return 0; +} + +static inline uint64_t ror(uint64_t elt, unsigned size) { + return ((elt & 1) << (size-1)) | (elt >> 1); +} + +/// processLogicalImmediate - Determine if an immediate value can be encoded +/// as the immediate operand of a logical instruction for the given register +/// size. If so, return true with "encoding" set to the encoded value in +/// the form N:immr:imms. +static inline bool processLogicalImmediate(uint64_t imm, unsigned regSize, + uint64_t &encoding) { + if (imm == 0ULL || imm == ~0ULL || + (regSize != 64 && (imm >> regSize != 0 || imm == ~0U))) + return false; + + unsigned size = 2; + uint64_t eltVal = imm; + + // First, determine the element size. + while (size < regSize) { + unsigned numElts = regSize / size; + unsigned mask = (1ULL << size) - 1; + uint64_t lowestEltVal = imm & mask; + + bool allMatched = true; + for (unsigned i = 1; i < numElts; ++i) { + uint64_t currEltVal = (imm >> (i*size)) & mask; + if (currEltVal != lowestEltVal) { + allMatched = false; + break; + } + } + + if (allMatched) { + eltVal = lowestEltVal; + break; + } + + size *= 2; + } + + // Second, determine the rotation to make the element be: 0^m 1^n. + for (unsigned i = 0; i < size; ++i) { + eltVal = ror(eltVal, size); + uint32_t clz = countLeadingZeros(eltVal) - (64 - size); + uint32_t cto = CountTrailingOnes_64(eltVal); + + if (clz + cto == size) { + // Encode in immr the number of RORs it would take to get *from* this + // element value to our target value, where i+1 is the number of RORs + // to go the opposite direction. + unsigned immr = size - (i + 1); + + // If size has a 1 in the n'th bit, create a value that has zeroes in + // bits [0, n] and ones above that. + uint64_t nimms = ~(size-1) << 1; + + // Or the CTO value into the low bits, which must be below the Nth bit + // bit mentioned above. + nimms |= (cto-1); + + // Extract the seventh bit and toggle it to create the N field. + unsigned N = ((nimms >> 6) & 1) ^ 1; + + encoding = (N << 12) | (immr << 6) | (nimms & 0x3f); + return true; + } + } + + return false; +} + +/// isLogicalImmediate - Return true if the immediate is valid for a logical +/// immediate instruction of the given register size. Return false otherwise. +static inline bool isLogicalImmediate(uint64_t imm, unsigned regSize) { + uint64_t encoding; + return processLogicalImmediate(imm, regSize, encoding); +} + +/// encodeLogicalImmediate - Return the encoded immediate value for a logical +/// immediate instruction of the given register size. +static inline uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize) { + uint64_t encoding = 0; + bool res = processLogicalImmediate(imm, regSize, encoding); + assert(res && "invalid logical immediate"); + (void)res; + return encoding; +} + +/// decodeLogicalImmediate - Decode a logical immediate value in the form +/// "N:immr:imms" (where the immr and imms fields are each 6 bits) into the +/// integer value it represents with regSize bits. +static inline uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize) { + // Extract the N, imms, and immr fields. + unsigned N = (val >> 12) & 1; + unsigned immr = (val >> 6) & 0x3f; + unsigned imms = val & 0x3f; + + assert((regSize == 64 || N == 0) && "undefined logical immediate encoding"); + int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f)); + assert(len >= 0 && "undefined logical immediate encoding"); + unsigned size = (1 << len); + unsigned R = immr & (size - 1); + unsigned S = imms & (size - 1); + assert(S != size - 1 && "undefined logical immediate encoding"); + uint64_t pattern = (1ULL << (S + 1)) - 1; + for (unsigned i = 0; i < R; ++i) + pattern = ror(pattern, size); + + // Replicate the pattern to fill the regSize. + while (size != regSize) { + pattern |= (pattern << size); + size *= 2; + } + return pattern; +} + +/// isValidDecodeLogicalImmediate - Check to see if the logical immediate value +/// in the form "N:immr:imms" (where the immr and imms fields are each 6 bits) +/// is a valid encoding for an integer value with regSize bits. +static inline bool isValidDecodeLogicalImmediate(uint64_t val, + unsigned regSize) { + // Extract the N and imms fields needed for checking. + unsigned N = (val >> 12) & 1; + unsigned imms = val & 0x3f; + + if (regSize == 32 && N != 0) // undefined logical immediate encoding + return false; + int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f)); + if (len < 0) // undefined logical immediate encoding + return false; + unsigned size = (1 << len); + unsigned S = imms & (size - 1); + if (S == size - 1) // undefined logical immediate encoding + return false; + + return true; +} + +//===----------------------------------------------------------------------===// +// Floating-point Immediates +// +static inline float getFPImmFloat(unsigned Imm) { + // We expect an 8-bit binary encoding of a floating-point number here. + union { + uint32_t I; + float F; + } FPUnion; + + uint8_t Sign = (Imm >> 7) & 0x1; + uint8_t Exp = (Imm >> 4) & 0x7; + uint8_t Mantissa = Imm & 0xf; + + // 8-bit FP iEEEE Float Encoding + // abcd efgh aBbbbbbc defgh000 00000000 00000000 + // + // where B = NOT(b); + + FPUnion.I = 0; + FPUnion.I |= Sign << 31; + FPUnion.I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30; + FPUnion.I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25; + FPUnion.I |= (Exp & 0x3) << 23; + FPUnion.I |= Mantissa << 19; + return FPUnion.F; +} + +/// getFP32Imm - Return an 8-bit floating-point version of the 32-bit +/// floating-point value. If the value cannot be represented as an 8-bit +/// floating-point value, then return -1. +static inline int getFP32Imm(const APInt &Imm) { + uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; + int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 + int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits + + // We can handle 4 bits of mantissa. + // mantissa = (16+UInt(e:f:g:h))/16. + if (Mantissa & 0x7ffff) + return -1; + Mantissa >>= 19; + if ((Mantissa & 0xf) != Mantissa) + return -1; + + // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 + if (Exp < -3 || Exp > 4) + return -1; + Exp = ((Exp+3) & 0x7) ^ 4; + + return ((int)Sign << 7) | (Exp << 4) | Mantissa; +} + +static inline int getFP32Imm(const APFloat &FPImm) { + return getFP32Imm(FPImm.bitcastToAPInt()); +} + +/// getFP64Imm - Return an 8-bit floating-point version of the 64-bit +/// floating-point value. If the value cannot be represented as an 8-bit +/// floating-point value, then return -1. +static inline int getFP64Imm(const APInt &Imm) { + uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; + int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 + uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL; + + // We can handle 4 bits of mantissa. + // mantissa = (16+UInt(e:f:g:h))/16. + if (Mantissa & 0xffffffffffffULL) + return -1; + Mantissa >>= 48; + if ((Mantissa & 0xf) != Mantissa) + return -1; + + // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 + if (Exp < -3 || Exp > 4) + return -1; + Exp = ((Exp+3) & 0x7) ^ 4; + + return ((int)Sign << 7) | (Exp << 4) | Mantissa; +} + +static inline int getFP64Imm(const APFloat &FPImm) { + return getFP64Imm(FPImm.bitcastToAPInt()); +} + +//===--------------------------------------------------------------------===// +// AdvSIMD Modified Immediates +//===--------------------------------------------------------------------===// + +// 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh +static inline bool isAdvSIMDModImmType1(uint64_t Imm) { + return ((Imm >> 32) == (Imm & 0xffffffffULL)) && + ((Imm & 0xffffff00ffffff00ULL) == 0); +} + +static inline uint8_t encodeAdvSIMDModImmType1(uint64_t Imm) { + return (Imm & 0xffULL); +} + +static inline uint64_t decodeAdvSIMDModImmType1(uint8_t Imm) { + uint64_t EncVal = Imm; + return (EncVal << 32) | EncVal; +} + +// 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 +static inline bool isAdvSIMDModImmType2(uint64_t Imm) { + return ((Imm >> 32) == (Imm & 0xffffffffULL)) && + ((Imm & 0xffff00ffffff00ffULL) == 0); +} + +static inline uint8_t encodeAdvSIMDModImmType2(uint64_t Imm) { + return (Imm & 0xff00ULL) >> 8; +} + +static inline uint64_t decodeAdvSIMDModImmType2(uint8_t Imm) { + uint64_t EncVal = Imm; + return (EncVal << 40) | (EncVal << 8); +} + +// 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 +static inline bool isAdvSIMDModImmType3(uint64_t Imm) { + return ((Imm >> 32) == (Imm & 0xffffffffULL)) && + ((Imm & 0xff00ffffff00ffffULL) == 0); +} + +static inline uint8_t encodeAdvSIMDModImmType3(uint64_t Imm) { + return (Imm & 0xff0000ULL) >> 16; +} + +static inline uint64_t decodeAdvSIMDModImmType3(uint8_t Imm) { + uint64_t EncVal = Imm; + return (EncVal << 48) | (EncVal << 16); +} + +// abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 +static inline bool isAdvSIMDModImmType4(uint64_t Imm) { + return ((Imm >> 32) == (Imm & 0xffffffffULL)) && + ((Imm & 0x00ffffff00ffffffULL) == 0); +} + +static inline uint8_t encodeAdvSIMDModImmType4(uint64_t Imm) { + return (Imm & 0xff000000ULL) >> 24; +} + +static inline uint64_t decodeAdvSIMDModImmType4(uint8_t Imm) { + uint64_t EncVal = Imm; + return (EncVal << 56) | (EncVal << 24); +} + +// 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh +static inline bool isAdvSIMDModImmType5(uint64_t Imm) { + return ((Imm >> 32) == (Imm & 0xffffffffULL)) && + (((Imm & 0x00ff0000ULL) >> 16) == (Imm & 0x000000ffULL)) && + ((Imm & 0xff00ff00ff00ff00ULL) == 0); +} + +static inline uint8_t encodeAdvSIMDModImmType5(uint64_t Imm) { + return (Imm & 0xffULL); +} + +static inline uint64_t decodeAdvSIMDModImmType5(uint8_t Imm) { + uint64_t EncVal = Imm; + return (EncVal << 48) | (EncVal << 32) | (EncVal << 16) | EncVal; +} + +// abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 +static inline bool isAdvSIMDModImmType6(uint64_t Imm) { + return ((Imm >> 32) == (Imm & 0xffffffffULL)) && + (((Imm & 0xff000000ULL) >> 16) == (Imm & 0x0000ff00ULL)) && + ((Imm & 0x00ff00ff00ff00ffULL) == 0); +} + +static inline uint8_t encodeAdvSIMDModImmType6(uint64_t Imm) { + return (Imm & 0xff00ULL) >> 8; +} + +static inline uint64_t decodeAdvSIMDModImmType6(uint8_t Imm) { + uint64_t EncVal = Imm; + return (EncVal << 56) | (EncVal << 40) | (EncVal << 24) | (EncVal << 8); +} + +// 0x00 0x00 abcdefgh 0xFF 0x00 0x00 abcdefgh 0xFF +static inline bool isAdvSIMDModImmType7(uint64_t Imm) { + return ((Imm >> 32) == (Imm & 0xffffffffULL)) && + ((Imm & 0xffff00ffffff00ffULL) == 0x000000ff000000ffULL); +} + +static inline uint8_t encodeAdvSIMDModImmType7(uint64_t Imm) { + return (Imm & 0xff00ULL) >> 8; +} + +static inline uint64_t decodeAdvSIMDModImmType7(uint8_t Imm) { + uint64_t EncVal = Imm; + return (EncVal << 40) | (EncVal << 8) | 0x000000ff000000ffULL; +} + +// 0x00 abcdefgh 0xFF 0xFF 0x00 abcdefgh 0xFF 0xFF +static inline bool isAdvSIMDModImmType8(uint64_t Imm) { + return ((Imm >> 32) == (Imm & 0xffffffffULL)) && + ((Imm & 0xff00ffffff00ffffULL) == 0x0000ffff0000ffffULL); +} + +static inline uint64_t decodeAdvSIMDModImmType8(uint8_t Imm) { + uint64_t EncVal = Imm; + return (EncVal << 48) | (EncVal << 16) | 0x0000ffff0000ffffULL; +} + +static inline uint8_t encodeAdvSIMDModImmType8(uint64_t Imm) { + return (Imm & 0x00ff0000ULL) >> 16; +} + +// abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh +static inline bool isAdvSIMDModImmType9(uint64_t Imm) { + return ((Imm >> 32) == (Imm & 0xffffffffULL)) && + ((Imm >> 48) == (Imm & 0x0000ffffULL)) && + ((Imm >> 56) == (Imm & 0x000000ffULL)); +} + +static inline uint8_t encodeAdvSIMDModImmType9(uint64_t Imm) { + return (Imm & 0xffULL); +} + +static inline uint64_t decodeAdvSIMDModImmType9(uint8_t Imm) { + uint64_t EncVal = Imm; + EncVal |= (EncVal << 8); + EncVal |= (EncVal << 16); + EncVal |= (EncVal << 32); + return EncVal; +} + +// aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh +// cmode: 1110, op: 1 +static inline bool isAdvSIMDModImmType10(uint64_t Imm) { + uint64_t ByteA = Imm & 0xff00000000000000ULL; + uint64_t ByteB = Imm & 0x00ff000000000000ULL; + uint64_t ByteC = Imm & 0x0000ff0000000000ULL; + uint64_t ByteD = Imm & 0x000000ff00000000ULL; + uint64_t ByteE = Imm & 0x00000000ff000000ULL; + uint64_t ByteF = Imm & 0x0000000000ff0000ULL; + uint64_t ByteG = Imm & 0x000000000000ff00ULL; + uint64_t ByteH = Imm & 0x00000000000000ffULL; + + return (ByteA == 0ULL || ByteA == 0xff00000000000000ULL) && + (ByteB == 0ULL || ByteB == 0x00ff000000000000ULL) && + (ByteC == 0ULL || ByteC == 0x0000ff0000000000ULL) && + (ByteD == 0ULL || ByteD == 0x000000ff00000000ULL) && + (ByteE == 0ULL || ByteE == 0x00000000ff000000ULL) && + (ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) && + (ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) && + (ByteH == 0ULL || ByteH == 0x00000000000000ffULL); +} + +static inline uint8_t encodeAdvSIMDModImmType10(uint64_t Imm) { + uint8_t BitA = (Imm & 0xff00000000000000ULL) != 0; + uint8_t BitB = (Imm & 0x00ff000000000000ULL) != 0; + uint8_t BitC = (Imm & 0x0000ff0000000000ULL) != 0; + uint8_t BitD = (Imm & 0x000000ff00000000ULL) != 0; + uint8_t BitE = (Imm & 0x00000000ff000000ULL) != 0; + uint8_t BitF = (Imm & 0x0000000000ff0000ULL) != 0; + uint8_t BitG = (Imm & 0x000000000000ff00ULL) != 0; + uint8_t BitH = (Imm & 0x00000000000000ffULL) != 0; + + uint8_t EncVal = BitA; + EncVal <<= 1; + EncVal |= BitB; + EncVal <<= 1; + EncVal |= BitC; + EncVal <<= 1; + EncVal |= BitD; + EncVal <<= 1; + EncVal |= BitE; + EncVal <<= 1; + EncVal |= BitF; + EncVal <<= 1; + EncVal |= BitG; + EncVal <<= 1; + EncVal |= BitH; + return EncVal; +} + +static inline uint64_t decodeAdvSIMDModImmType10(uint8_t Imm) { + uint64_t EncVal = 0; + if (Imm & 0x80) EncVal |= 0xff00000000000000ULL; + if (Imm & 0x40) EncVal |= 0x00ff000000000000ULL; + if (Imm & 0x20) EncVal |= 0x0000ff0000000000ULL; + if (Imm & 0x10) EncVal |= 0x000000ff00000000ULL; + if (Imm & 0x08) EncVal |= 0x00000000ff000000ULL; + if (Imm & 0x04) EncVal |= 0x0000000000ff0000ULL; + if (Imm & 0x02) EncVal |= 0x000000000000ff00ULL; + if (Imm & 0x01) EncVal |= 0x00000000000000ffULL; + return EncVal; +} + +// aBbbbbbc defgh000 0x00 0x00 aBbbbbbc defgh000 0x00 0x00 +static inline bool isAdvSIMDModImmType11(uint64_t Imm) { + uint64_t BString = (Imm & 0x7E000000ULL) >> 25; + return ((Imm >> 32) == (Imm & 0xffffffffULL)) && + (BString == 0x1f || BString == 0x20) && + ((Imm & 0x0007ffff0007ffffULL) == 0); +} + +static inline uint8_t encodeAdvSIMDModImmType11(uint64_t Imm) { + uint8_t BitA = (Imm & 0x80000000ULL) != 0; + uint8_t BitB = (Imm & 0x20000000ULL) != 0; + uint8_t BitC = (Imm & 0x01000000ULL) != 0; + uint8_t BitD = (Imm & 0x00800000ULL) != 0; + uint8_t BitE = (Imm & 0x00400000ULL) != 0; + uint8_t BitF = (Imm & 0x00200000ULL) != 0; + uint8_t BitG = (Imm & 0x00100000ULL) != 0; + uint8_t BitH = (Imm & 0x00080000ULL) != 0; + + uint8_t EncVal = BitA; + EncVal <<= 1; + EncVal |= BitB; + EncVal <<= 1; + EncVal |= BitC; + EncVal <<= 1; + EncVal |= BitD; + EncVal <<= 1; + EncVal |= BitE; + EncVal <<= 1; + EncVal |= BitF; + EncVal <<= 1; + EncVal |= BitG; + EncVal <<= 1; + EncVal |= BitH; + return EncVal; +} + +static inline uint64_t decodeAdvSIMDModImmType11(uint8_t Imm) { + uint64_t EncVal = 0; + if (Imm & 0x80) EncVal |= 0x80000000ULL; + if (Imm & 0x40) EncVal |= 0x3e000000ULL; + else EncVal |= 0x40000000ULL; + if (Imm & 0x20) EncVal |= 0x01000000ULL; + if (Imm & 0x10) EncVal |= 0x00800000ULL; + if (Imm & 0x08) EncVal |= 0x00400000ULL; + if (Imm & 0x04) EncVal |= 0x00200000ULL; + if (Imm & 0x02) EncVal |= 0x00100000ULL; + if (Imm & 0x01) EncVal |= 0x00080000ULL; + return (EncVal << 32) | EncVal; +} + +// aBbbbbbb bbcdefgh 0x00 0x00 0x00 0x00 0x00 0x00 +static inline bool isAdvSIMDModImmType12(uint64_t Imm) { + uint64_t BString = (Imm & 0x7fc0000000000000ULL) >> 54; + return ((BString == 0xff || BString == 0x100) && + ((Imm & 0x0000ffffffffffffULL) == 0)); +} + +static inline uint8_t encodeAdvSIMDModImmType12(uint64_t Imm) { + uint8_t BitA = (Imm & 0x8000000000000000ULL) != 0; + uint8_t BitB = (Imm & 0x0040000000000000ULL) != 0; + uint8_t BitC = (Imm & 0x0020000000000000ULL) != 0; + uint8_t BitD = (Imm & 0x0010000000000000ULL) != 0; + uint8_t BitE = (Imm & 0x0008000000000000ULL) != 0; + uint8_t BitF = (Imm & 0x0004000000000000ULL) != 0; + uint8_t BitG = (Imm & 0x0002000000000000ULL) != 0; + uint8_t BitH = (Imm & 0x0001000000000000ULL) != 0; + + uint8_t EncVal = BitA; + EncVal <<= 1; + EncVal |= BitB; + EncVal <<= 1; + EncVal |= BitC; + EncVal <<= 1; + EncVal |= BitD; + EncVal <<= 1; + EncVal |= BitE; + EncVal <<= 1; + EncVal |= BitF; + EncVal <<= 1; + EncVal |= BitG; + EncVal <<= 1; + EncVal |= BitH; + return EncVal; +} + +static inline uint64_t decodeAdvSIMDModImmType12(uint8_t Imm) { + uint64_t EncVal = 0; + if (Imm & 0x80) EncVal |= 0x8000000000000000ULL; + if (Imm & 0x40) EncVal |= 0x3fc0000000000000ULL; + else EncVal |= 0x4000000000000000ULL; + if (Imm & 0x20) EncVal |= 0x0020000000000000ULL; + if (Imm & 0x10) EncVal |= 0x0010000000000000ULL; + if (Imm & 0x08) EncVal |= 0x0008000000000000ULL; + if (Imm & 0x04) EncVal |= 0x0004000000000000ULL; + if (Imm & 0x02) EncVal |= 0x0002000000000000ULL; + if (Imm & 0x01) EncVal |= 0x0001000000000000ULL; + return (EncVal << 32) | EncVal; +} + +} // end namespace ARM64_AM + +} // end namespace llvm + +#endif diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64AsmBackend.cpp b/lib/Target/ARM64/MCTargetDesc/ARM64AsmBackend.cpp new file mode 100644 index 0000000..26813e2 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64AsmBackend.cpp @@ -0,0 +1,533 @@ +//===-- ARM64AsmBackend.cpp - ARM64 Assembler Backend ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "ARM64.h" +#include "ARM64RegisterInfo.h" +#include "MCTargetDesc/ARM64FixupKinds.h" +#include "llvm/ADT/Triple.h" +#include "llvm/MC/MCAsmBackend.h" +#include "llvm/MC/MCDirectives.h" +#include "llvm/MC/MCFixupKindInfo.h" +#include "llvm/MC/MCObjectWriter.h" +#include "llvm/MC/MCSectionMachO.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MachO.h" +using namespace llvm; + +namespace { + +class ARM64AsmBackend : public MCAsmBackend { + static const unsigned PCRelFlagVal = + MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel; + +public: + ARM64AsmBackend(const Target &T) : MCAsmBackend() {} + + unsigned getNumFixupKinds() const { return ARM64::NumTargetFixupKinds; } + + const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const { + const static MCFixupKindInfo Infos[ARM64::NumTargetFixupKinds] = { + // This table *must* be in the order that the fixup_* kinds are defined in + // ARM64FixupKinds.h. + // + // Name Offset (bits) Size (bits) Flags + { "fixup_arm64_pcrel_adr_imm21", 0, 32, PCRelFlagVal }, + { "fixup_arm64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal }, + { "fixup_arm64_add_imm12", 10, 12, 0 }, + { "fixup_arm64_ldst_imm12_scale1", 10, 12, 0 }, + { "fixup_arm64_ldst_imm12_scale2", 10, 12, 0 }, + { "fixup_arm64_ldst_imm12_scale4", 10, 12, 0 }, + { "fixup_arm64_ldst_imm12_scale8", 10, 12, 0 }, + { "fixup_arm64_ldst_imm12_scale16", 10, 12, 0 }, + { "fixup_arm64_movw", 5, 16, 0 }, + { "fixup_arm64_pcrel_branch14", 5, 14, PCRelFlagVal }, + { "fixup_arm64_pcrel_imm19", 5, 19, PCRelFlagVal }, + { "fixup_arm64_pcrel_branch26", 0, 26, PCRelFlagVal }, + { "fixup_arm64_pcrel_call26", 0, 26, PCRelFlagVal }, + { "fixup_arm64_tlsdesc_call", 0, 0, 0 } + }; + + if (Kind < FirstTargetFixupKind) + return MCAsmBackend::getFixupKindInfo(Kind); + + assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && + "Invalid kind!"); + return Infos[Kind - FirstTargetFixupKind]; + } + + void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, + uint64_t Value, bool IsPCRel) const; + + bool mayNeedRelaxation(const MCInst &Inst) const; + bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, + const MCRelaxableFragment *DF, + const MCAsmLayout &Layout) const; + void relaxInstruction(const MCInst &Inst, MCInst &Res) const; + bool writeNopData(uint64_t Count, MCObjectWriter *OW) const; + + void HandleAssemblerFlag(MCAssemblerFlag Flag) {} + + unsigned getPointerSize() const { return 8; } +}; + +} // end anonymous namespace + +/// \brief The number of bytes the fixup may change. +static unsigned getFixupKindNumBytes(unsigned Kind) { + switch (Kind) { + default: + assert(0 && "Unknown fixup kind!"); + + case ARM64::fixup_arm64_tlsdesc_call: + return 0; + + case FK_Data_1: + return 1; + + case FK_Data_2: + case ARM64::fixup_arm64_movw: + return 2; + + case ARM64::fixup_arm64_pcrel_branch14: + case ARM64::fixup_arm64_add_imm12: + case ARM64::fixup_arm64_ldst_imm12_scale1: + case ARM64::fixup_arm64_ldst_imm12_scale2: + case ARM64::fixup_arm64_ldst_imm12_scale4: + case ARM64::fixup_arm64_ldst_imm12_scale8: + case ARM64::fixup_arm64_ldst_imm12_scale16: + case ARM64::fixup_arm64_pcrel_imm19: + return 3; + + case ARM64::fixup_arm64_pcrel_adr_imm21: + case ARM64::fixup_arm64_pcrel_adrp_imm21: + case ARM64::fixup_arm64_pcrel_branch26: + case ARM64::fixup_arm64_pcrel_call26: + case FK_Data_4: + return 4; + + case FK_Data_8: + return 8; + } +} + +static unsigned AdrImmBits(unsigned Value) { + unsigned lo2 = Value & 0x3; + unsigned hi19 = (Value & 0x1ffffc) >> 2; + return (hi19 << 5) | (lo2 << 29); +} + +static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) { + int64_t SignedValue = static_cast<int64_t>(Value); + switch (Kind) { + default: + assert(false && "Unknown fixup kind!"); + case ARM64::fixup_arm64_pcrel_adr_imm21: + if (SignedValue > 2097151 || SignedValue < -2097152) + report_fatal_error("fixup value out of range"); + return AdrImmBits(Value & 0x1fffffULL); + case ARM64::fixup_arm64_pcrel_adrp_imm21: + return AdrImmBits((Value & 0x1fffff000ULL) >> 12); + case ARM64::fixup_arm64_pcrel_imm19: + // Signed 21-bit immediate + if (SignedValue > 2097151 || SignedValue < -2097152) + report_fatal_error("fixup value out of range"); + // Low two bits are not encoded. + return (Value >> 2) & 0x7ffff; + case ARM64::fixup_arm64_add_imm12: + case ARM64::fixup_arm64_ldst_imm12_scale1: + // Unsigned 12-bit immediate + if (Value >= 0x1000) + report_fatal_error("invalid imm12 fixup value"); + return Value; + case ARM64::fixup_arm64_ldst_imm12_scale2: + // Unsigned 12-bit immediate which gets multiplied by 2 + if (Value & 1 || Value >= 0x2000) + report_fatal_error("invalid imm12 fixup value"); + return Value >> 1; + case ARM64::fixup_arm64_ldst_imm12_scale4: + // Unsigned 12-bit immediate which gets multiplied by 4 + if (Value & 3 || Value >= 0x4000) + report_fatal_error("invalid imm12 fixup value"); + return Value >> 2; + case ARM64::fixup_arm64_ldst_imm12_scale8: + // Unsigned 12-bit immediate which gets multiplied by 8 + if (Value & 7 || Value >= 0x8000) + report_fatal_error("invalid imm12 fixup value"); + return Value >> 3; + case ARM64::fixup_arm64_ldst_imm12_scale16: + // Unsigned 12-bit immediate which gets multiplied by 16 + if (Value & 15 || Value >= 0x10000) + report_fatal_error("invalid imm12 fixup value"); + return Value >> 4; + case ARM64::fixup_arm64_movw: + report_fatal_error("no resolvable MOVZ/MOVK fixups supported yet"); + return Value; + case ARM64::fixup_arm64_pcrel_branch14: + // Signed 16-bit immediate + if (SignedValue > 32767 || SignedValue < -32768) + report_fatal_error("fixup value out of range"); + // Low two bits are not encoded (4-byte alignment assumed). + if (Value & 0x3) + report_fatal_error("fixup not sufficiently aligned"); + return (Value >> 2) & 0x3fff; + case ARM64::fixup_arm64_pcrel_branch26: + case ARM64::fixup_arm64_pcrel_call26: + // Signed 28-bit immediate + if (SignedValue > 134217727 || SignedValue < -134217728) + report_fatal_error("fixup value out of range"); + // Low two bits are not encoded (4-byte alignment assumed). + if (Value & 0x3) + report_fatal_error("fixup not sufficiently aligned"); + return (Value >> 2) & 0x3ffffff; + case FK_Data_1: + case FK_Data_2: + case FK_Data_4: + case FK_Data_8: + return Value; + } +} + +void ARM64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data, + unsigned DataSize, uint64_t Value, + bool IsPCRel) const { + unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); + if (!Value) + return; // Doesn't change encoding. + MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); + // Apply any target-specific value adjustments. + Value = adjustFixupValue(Fixup.getKind(), Value); + + // Shift the value into position. + Value <<= Info.TargetOffset; + + unsigned Offset = Fixup.getOffset(); + assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!"); + + // For each byte of the fragment that the fixup touches, mask in the + // bits from the fixup value. + for (unsigned i = 0; i != NumBytes; ++i) + Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); +} + +bool ARM64AsmBackend::mayNeedRelaxation(const MCInst &Inst) const { + return false; +} + +bool ARM64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, + const MCRelaxableFragment *DF, + const MCAsmLayout &Layout) const { + // FIXME: This isn't correct for ARM64. Just moving the "generic" logic + // into the targets for now. + // + // Relax if the value is too big for a (signed) i8. + return int64_t(Value) != int64_t(int8_t(Value)); +} + +void ARM64AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const { + assert(false && "ARM64AsmBackend::relaxInstruction() unimplemented"); +} + +bool ARM64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { + // If the count is not 4-byte aligned, we must be writing data into the text + // section (otherwise we have unaligned instructions, and thus have far + // bigger problems), so just write zeros instead. + if ((Count & 3) != 0) { + for (uint64_t i = 0, e = (Count & 3); i != e; ++i) + OW->Write8(0); + } + + // We are properly aligned, so write NOPs as requested. + Count /= 4; + for (uint64_t i = 0; i != Count; ++i) + OW->Write32(0xd503201f); + return true; +} + +namespace { + +namespace CU { + +/// \brief Compact unwind encoding values. +enum CompactUnwindEncodings { + /// \brief A "frameless" leaf function, where no non-volatile registers are + /// saved. The return remains in LR throughout the function. + UNWIND_ARM64_MODE_FRAMELESS = 0x02000000, + + /// \brief No compact unwind encoding available. Instead the low 23-bits of + /// the compact unwind encoding is the offset of the DWARF FDE in the + /// __eh_frame section. This mode is never used in object files. It is only + /// generated by the linker in final linked images, which have only DWARF info + /// for a function. + UNWIND_ARM64_MODE_DWARF = 0x03000000, + + /// \brief This is a standard arm64 prologue where FP/LR are immediately + /// pushed on the stack, then SP is copied to FP. If there are any + /// non-volatile register saved, they are copied into the stack fame in pairs + /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the + /// five X pairs and four D pairs can be saved, but the memory layout must be + /// in register number order. + UNWIND_ARM64_MODE_FRAME = 0x04000000, + + /// \brief Frame register pair encodings. + UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001, + UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002, + UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004, + UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008, + UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010, + UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100, + UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200, + UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400, + UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800 +}; + +} // end CU namespace + +// FIXME: This should be in a separate file. +class DarwinARM64AsmBackend : public ARM64AsmBackend { + const MCRegisterInfo &MRI; + + /// \brief Encode compact unwind stack adjustment for frameless functions. + /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h. + /// The stack size always needs to be 16 byte aligned. + uint32_t encodeStackAdjustment(uint32_t StackSize) const { + return (StackSize / 16) << 12; + } + +public: + DarwinARM64AsmBackend(const Target &T, const MCRegisterInfo &MRI) + : ARM64AsmBackend(T), MRI(MRI) {} + + MCObjectWriter *createObjectWriter(raw_ostream &OS) const { + return createARM64MachObjectWriter(OS, MachO::CPU_TYPE_ARM64, + MachO::CPU_SUBTYPE_ARM64_ALL); + } + + virtual bool doesSectionRequireSymbols(const MCSection &Section) const { + // Any section for which the linker breaks things into atoms needs to + // preserve symbols, including assembler local symbols, to identify + // those atoms. These sections are: + // Sections of type: + // + // S_CSTRING_LITERALS (e.g. __cstring) + // S_LITERAL_POINTERS (e.g. objc selector pointers) + // S_16BYTE_LITERALS, S_8BYTE_LITERALS, S_4BYTE_LITERALS + // + // Sections named: + // + // __TEXT,__eh_frame + // __TEXT,__ustring + // __DATA,__cfstring + // __DATA,__objc_classrefs + // __DATA,__objc_catlist + // + // FIXME: It would be better if the compiler used actual linker local + // symbols for each of these sections rather than preserving what + // are ostensibly assembler local symbols. + const MCSectionMachO &SMO = static_cast<const MCSectionMachO &>(Section); + return (SMO.getType() == MachO::S_CSTRING_LITERALS || + SMO.getType() == MachO::S_4BYTE_LITERALS || + SMO.getType() == MachO::S_8BYTE_LITERALS || + SMO.getType() == MachO::S_16BYTE_LITERALS || + SMO.getType() == MachO::S_LITERAL_POINTERS || + (SMO.getSegmentName() == "__TEXT" && + (SMO.getSectionName() == "__eh_frame" || + SMO.getSectionName() == "__ustring")) || + (SMO.getSegmentName() == "__DATA" && + (SMO.getSectionName() == "__cfstring" || + SMO.getSectionName() == "__objc_classrefs" || + SMO.getSectionName() == "__objc_catlist"))); + } + + /// \brief Generate the compact unwind encoding from the CFI directives. + virtual uint32_t + generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const + override { + if (Instrs.empty()) + return CU::UNWIND_ARM64_MODE_FRAMELESS; + + bool HasFP = false; + unsigned StackSize = 0; + + uint32_t CompactUnwindEncoding = 0; + for (size_t i = 0, e = Instrs.size(); i != e; ++i) { + const MCCFIInstruction &Inst = Instrs[i]; + + switch (Inst.getOperation()) { + default: + // Cannot handle this directive: bail out. + return CU::UNWIND_ARM64_MODE_DWARF; + case MCCFIInstruction::OpDefCfa: { + // Defines a frame pointer. + assert(getXRegFromWReg(MRI.getLLVMRegNum(Inst.getRegister(), true)) == + ARM64::FP && + "Invalid frame pointer!"); + assert(i + 2 < e && "Insufficient CFI instructions to define a frame!"); + + const MCCFIInstruction &LRPush = Instrs[++i]; + assert(LRPush.getOperation() == MCCFIInstruction::OpOffset && + "Link register not pushed!"); + const MCCFIInstruction &FPPush = Instrs[++i]; + assert(FPPush.getOperation() == MCCFIInstruction::OpOffset && + "Frame pointer not pushed!"); + + unsigned LRReg = MRI.getLLVMRegNum(LRPush.getRegister(), true); + unsigned FPReg = MRI.getLLVMRegNum(FPPush.getRegister(), true); + + LRReg = getXRegFromWReg(LRReg); + FPReg = getXRegFromWReg(FPReg); + + assert(LRReg == ARM64::LR && FPReg == ARM64::FP && + "Pushing invalid registers for frame!"); + + // Indicate that the function has a frame. + CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME; + HasFP = true; + break; + } + case MCCFIInstruction::OpDefCfaOffset: { + assert(StackSize == 0 && "We already have the CFA offset!"); + StackSize = std::abs(Inst.getOffset()); + break; + } + case MCCFIInstruction::OpOffset: { + // Registers are saved in pairs. We expect there to be two consecutive + // `.cfi_offset' instructions with the appropriate registers specified. + unsigned Reg1 = MRI.getLLVMRegNum(Inst.getRegister(), true); + if (i + 1 == e) + return CU::UNWIND_ARM64_MODE_DWARF; + + const MCCFIInstruction &Inst2 = Instrs[++i]; + if (Inst2.getOperation() != MCCFIInstruction::OpOffset) + return CU::UNWIND_ARM64_MODE_DWARF; + unsigned Reg2 = MRI.getLLVMRegNum(Inst2.getRegister(), true); + + // N.B. The encodings must be in register number order, and the X + // registers before the D registers. + + // X19/X20 pair = 0x00000001, + // X21/X22 pair = 0x00000002, + // X23/X24 pair = 0x00000004, + // X25/X26 pair = 0x00000008, + // X27/X28 pair = 0x00000010 + Reg1 = getXRegFromWReg(Reg1); + Reg2 = getXRegFromWReg(Reg2); + + if (Reg1 == ARM64::X19 && Reg2 == ARM64::X20 && + (CompactUnwindEncoding & 0xF1E) == 0) + CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR; + else if (Reg1 == ARM64::X21 && Reg2 == ARM64::X22 && + (CompactUnwindEncoding & 0xF1C) == 0) + CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR; + else if (Reg1 == ARM64::X23 && Reg2 == ARM64::X24 && + (CompactUnwindEncoding & 0xF18) == 0) + CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR; + else if (Reg1 == ARM64::X25 && Reg2 == ARM64::X26 && + (CompactUnwindEncoding & 0xF10) == 0) + CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR; + else if (Reg1 == ARM64::X27 && Reg2 == ARM64::X28 && + (CompactUnwindEncoding & 0xF00) == 0) + CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR; + else { + Reg1 = getDRegFromBReg(Reg1); + Reg2 = getDRegFromBReg(Reg2); + + // D8/D9 pair = 0x00000100, + // D10/D11 pair = 0x00000200, + // D12/D13 pair = 0x00000400, + // D14/D15 pair = 0x00000800 + if (Reg1 == ARM64::D8 && Reg2 == ARM64::D9 && + (CompactUnwindEncoding & 0xE00) == 0) + CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR; + else if (Reg1 == ARM64::D10 && Reg2 == ARM64::D11 && + (CompactUnwindEncoding & 0xC00) == 0) + CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR; + else if (Reg1 == ARM64::D12 && Reg2 == ARM64::D13 && + (CompactUnwindEncoding & 0x800) == 0) + CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR; + else if (Reg1 == ARM64::D14 && Reg2 == ARM64::D15) + CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR; + else + // A pair was pushed which we cannot handle. + return CU::UNWIND_ARM64_MODE_DWARF; + } + + break; + } + } + } + + if (!HasFP) { + // With compact unwind info we can only represent stack adjustments of up + // to 65520 bytes. + if (StackSize > 65520) + return CU::UNWIND_ARM64_MODE_DWARF; + + CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS; + CompactUnwindEncoding |= encodeStackAdjustment(StackSize); + } + + return CompactUnwindEncoding; + } +}; + +} // end anonymous namespace + +namespace { + +class ELFARM64AsmBackend : public ARM64AsmBackend { +public: + uint8_t OSABI; + + ELFARM64AsmBackend(const Target &T, uint8_t OSABI) + : ARM64AsmBackend(T), OSABI(OSABI) {} + + MCObjectWriter *createObjectWriter(raw_ostream &OS) const { + return createARM64ELFObjectWriter(OS, OSABI); + } + + void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout, + const MCFixup &Fixup, const MCFragment *DF, + const MCValue &Target, uint64_t &Value, + bool &IsResolved) override; +}; + +void ELFARM64AsmBackend::processFixupValue(const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFixup &Fixup, + const MCFragment *DF, + const MCValue &Target, + uint64_t &Value, bool &IsResolved) { + // The ADRP instruction adds some multiple of 0x1000 to the current PC & + // ~0xfff. This means that the required offset to reach a symbol can vary by + // up to one step depending on where the ADRP is in memory. For example: + // + // ADRP x0, there + // there: + // + // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and + // we'll need that as an offset. At any other address "there" will be in the + // same page as the ADRP and the instruction should encode 0x0. Assuming the + // section isn't 0x1000-aligned, we therefore need to delegate this decision + // to the linker -- a relocation! + if ((uint32_t)Fixup.getKind() == ARM64::fixup_arm64_pcrel_adrp_imm21) + IsResolved = false; +} +} + +MCAsmBackend *llvm::createARM64AsmBackend(const Target &T, + const MCRegisterInfo &MRI, + StringRef TT, StringRef CPU) { + Triple TheTriple(TT); + + if (TheTriple.isOSDarwin()) + return new DarwinARM64AsmBackend(T, MRI); + + assert(TheTriple.isOSBinFormatELF() && "Expect either MachO or ELF target"); + return new ELFARM64AsmBackend(T, TheTriple.getOS()); +} diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64BaseInfo.h b/lib/Target/ARM64/MCTargetDesc/ARM64BaseInfo.h new file mode 100644 index 0000000..d3c2cf7 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64BaseInfo.h @@ -0,0 +1,998 @@ +//===-- ARM64BaseInfo.h - Top level definitions for ARM64 -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains small standalone helper functions and enum definitions for +// the ARM64 target useful for the compiler back-end and the MC libraries. +// As such, it deliberately does not include references to LLVM core +// code gen types, passes, etc.. +// +//===----------------------------------------------------------------------===// + +#ifndef ARM64BASEINFO_H +#define ARM64BASEINFO_H + +#include "ARM64MCTargetDesc.h" +#include "llvm/Support/ErrorHandling.h" + +namespace llvm { + +inline static unsigned getWRegFromXReg(unsigned Reg) { + switch (Reg) { + case ARM64::X0: return ARM64::W0; + case ARM64::X1: return ARM64::W1; + case ARM64::X2: return ARM64::W2; + case ARM64::X3: return ARM64::W3; + case ARM64::X4: return ARM64::W4; + case ARM64::X5: return ARM64::W5; + case ARM64::X6: return ARM64::W6; + case ARM64::X7: return ARM64::W7; + case ARM64::X8: return ARM64::W8; + case ARM64::X9: return ARM64::W9; + case ARM64::X10: return ARM64::W10; + case ARM64::X11: return ARM64::W11; + case ARM64::X12: return ARM64::W12; + case ARM64::X13: return ARM64::W13; + case ARM64::X14: return ARM64::W14; + case ARM64::X15: return ARM64::W15; + case ARM64::X16: return ARM64::W16; + case ARM64::X17: return ARM64::W17; + case ARM64::X18: return ARM64::W18; + case ARM64::X19: return ARM64::W19; + case ARM64::X20: return ARM64::W20; + case ARM64::X21: return ARM64::W21; + case ARM64::X22: return ARM64::W22; + case ARM64::X23: return ARM64::W23; + case ARM64::X24: return ARM64::W24; + case ARM64::X25: return ARM64::W25; + case ARM64::X26: return ARM64::W26; + case ARM64::X27: return ARM64::W27; + case ARM64::X28: return ARM64::W28; + case ARM64::FP: return ARM64::W29; + case ARM64::LR: return ARM64::W30; + case ARM64::SP: return ARM64::WSP; + case ARM64::XZR: return ARM64::WZR; + } + // For anything else, return it unchanged. + return Reg; +} + +inline static unsigned getXRegFromWReg(unsigned Reg) { + switch (Reg) { + case ARM64::W0: return ARM64::X0; + case ARM64::W1: return ARM64::X1; + case ARM64::W2: return ARM64::X2; + case ARM64::W3: return ARM64::X3; + case ARM64::W4: return ARM64::X4; + case ARM64::W5: return ARM64::X5; + case ARM64::W6: return ARM64::X6; + case ARM64::W7: return ARM64::X7; + case ARM64::W8: return ARM64::X8; + case ARM64::W9: return ARM64::X9; + case ARM64::W10: return ARM64::X10; + case ARM64::W11: return ARM64::X11; + case ARM64::W12: return ARM64::X12; + case ARM64::W13: return ARM64::X13; + case ARM64::W14: return ARM64::X14; + case ARM64::W15: return ARM64::X15; + case ARM64::W16: return ARM64::X16; + case ARM64::W17: return ARM64::X17; + case ARM64::W18: return ARM64::X18; + case ARM64::W19: return ARM64::X19; + case ARM64::W20: return ARM64::X20; + case ARM64::W21: return ARM64::X21; + case ARM64::W22: return ARM64::X22; + case ARM64::W23: return ARM64::X23; + case ARM64::W24: return ARM64::X24; + case ARM64::W25: return ARM64::X25; + case ARM64::W26: return ARM64::X26; + case ARM64::W27: return ARM64::X27; + case ARM64::W28: return ARM64::X28; + case ARM64::W29: return ARM64::FP; + case ARM64::W30: return ARM64::LR; + case ARM64::WSP: return ARM64::SP; + case ARM64::WZR: return ARM64::XZR; + } + // For anything else, return it unchanged. + return Reg; +} + +static inline unsigned getBRegFromDReg(unsigned Reg) { + switch (Reg) { + case ARM64::D0: return ARM64::B0; + case ARM64::D1: return ARM64::B1; + case ARM64::D2: return ARM64::B2; + case ARM64::D3: return ARM64::B3; + case ARM64::D4: return ARM64::B4; + case ARM64::D5: return ARM64::B5; + case ARM64::D6: return ARM64::B6; + case ARM64::D7: return ARM64::B7; + case ARM64::D8: return ARM64::B8; + case ARM64::D9: return ARM64::B9; + case ARM64::D10: return ARM64::B10; + case ARM64::D11: return ARM64::B11; + case ARM64::D12: return ARM64::B12; + case ARM64::D13: return ARM64::B13; + case ARM64::D14: return ARM64::B14; + case ARM64::D15: return ARM64::B15; + case ARM64::D16: return ARM64::B16; + case ARM64::D17: return ARM64::B17; + case ARM64::D18: return ARM64::B18; + case ARM64::D19: return ARM64::B19; + case ARM64::D20: return ARM64::B20; + case ARM64::D21: return ARM64::B21; + case ARM64::D22: return ARM64::B22; + case ARM64::D23: return ARM64::B23; + case ARM64::D24: return ARM64::B24; + case ARM64::D25: return ARM64::B25; + case ARM64::D26: return ARM64::B26; + case ARM64::D27: return ARM64::B27; + case ARM64::D28: return ARM64::B28; + case ARM64::D29: return ARM64::B29; + case ARM64::D30: return ARM64::B30; + case ARM64::D31: return ARM64::B31; + } + // For anything else, return it unchanged. + return Reg; +} + + +static inline unsigned getDRegFromBReg(unsigned Reg) { + switch (Reg) { + case ARM64::B0: return ARM64::D0; + case ARM64::B1: return ARM64::D1; + case ARM64::B2: return ARM64::D2; + case ARM64::B3: return ARM64::D3; + case ARM64::B4: return ARM64::D4; + case ARM64::B5: return ARM64::D5; + case ARM64::B6: return ARM64::D6; + case ARM64::B7: return ARM64::D7; + case ARM64::B8: return ARM64::D8; + case ARM64::B9: return ARM64::D9; + case ARM64::B10: return ARM64::D10; + case ARM64::B11: return ARM64::D11; + case ARM64::B12: return ARM64::D12; + case ARM64::B13: return ARM64::D13; + case ARM64::B14: return ARM64::D14; + case ARM64::B15: return ARM64::D15; + case ARM64::B16: return ARM64::D16; + case ARM64::B17: return ARM64::D17; + case ARM64::B18: return ARM64::D18; + case ARM64::B19: return ARM64::D19; + case ARM64::B20: return ARM64::D20; + case ARM64::B21: return ARM64::D21; + case ARM64::B22: return ARM64::D22; + case ARM64::B23: return ARM64::D23; + case ARM64::B24: return ARM64::D24; + case ARM64::B25: return ARM64::D25; + case ARM64::B26: return ARM64::D26; + case ARM64::B27: return ARM64::D27; + case ARM64::B28: return ARM64::D28; + case ARM64::B29: return ARM64::D29; + case ARM64::B30: return ARM64::D30; + case ARM64::B31: return ARM64::D31; + } + // For anything else, return it unchanged. + return Reg; +} + +namespace ARM64CC { + +// The CondCodes constants map directly to the 4-bit encoding of the condition +// field for predicated instructions. +enum CondCode { // Meaning (integer) Meaning (floating-point) + EQ = 0x0, // Equal Equal + NE = 0x1, // Not equal Not equal, or unordered + CS = 0x2, // Carry set >, ==, or unordered + CC = 0x3, // Carry clear Less than + MI = 0x4, // Minus, negative Less than + PL = 0x5, // Plus, positive or zero >, ==, or unordered + VS = 0x6, // Overflow Unordered + VC = 0x7, // No overflow Not unordered + HI = 0x8, // Unsigned higher Greater than, or unordered + LS = 0x9, // Unsigned lower or same Less than or equal + GE = 0xa, // Greater than or equal Greater than or equal + LT = 0xb, // Less than Less than, or unordered + GT = 0xc, // Greater than Greater than + LE = 0xd, // Less than or equal <, ==, or unordered + AL = 0xe // Always (unconditional) Always (unconditional) +}; + +inline static const char *getCondCodeName(CondCode Code) { + // cond<0> is ignored when cond<3:1> = 111, where 1110 is 0xe (aka AL). + if ((Code & AL) == AL) + Code = AL; + switch (Code) { + case EQ: return "eq"; + case NE: return "ne"; + case CS: return "cs"; + case CC: return "cc"; + case MI: return "mi"; + case PL: return "pl"; + case VS: return "vs"; + case VC: return "vc"; + case HI: return "hi"; + case LS: return "ls"; + case GE: return "ge"; + case LT: return "lt"; + case GT: return "gt"; + case LE: return "le"; + case AL: return "al"; + } + llvm_unreachable("Unknown condition code"); +} + +inline static CondCode getInvertedCondCode(CondCode Code) { + switch (Code) { + default: llvm_unreachable("Unknown condition code"); + case EQ: return NE; + case NE: return EQ; + case CS: return CC; + case CC: return CS; + case MI: return PL; + case PL: return MI; + case VS: return VC; + case VC: return VS; + case HI: return LS; + case LS: return HI; + case GE: return LT; + case LT: return GE; + case GT: return LE; + case LE: return GT; + } +} + +/// Given a condition code, return NZCV flags that would satisfy that condition. +/// The flag bits are in the format expected by the ccmp instructions. +/// Note that many different flag settings can satisfy a given condition code, +/// this function just returns one of them. +inline static unsigned getNZCVToSatisfyCondCode(CondCode Code) { + // NZCV flags encoded as expected by ccmp instructions, ARMv8 ISA 5.5.7. + enum { N = 8, Z = 4, C = 2, V = 1 }; + switch (Code) { + default: llvm_unreachable("Unknown condition code"); + case EQ: return Z; // Z == 1 + case NE: return 0; // Z == 0 + case CS: return C; // C == 1 + case CC: return 0; // C == 0 + case MI: return N; // N == 1 + case PL: return 0; // N == 0 + case VS: return V; // V == 1 + case VC: return 0; // V == 0 + case HI: return C; // C == 1 && Z == 0 + case LS: return 0; // C == 0 || Z == 1 + case GE: return 0; // N == V + case LT: return N; // N != V + case GT: return 0; // Z == 0 && N == V + case LE: return Z; // Z == 1 || N != V + } +} +} // end namespace ARM64CC + +namespace ARM64SYS { +enum BarrierOption { + InvalidBarrier = 0xff, + OSHLD = 0x1, + OSHST = 0x2, + OSH = 0x3, + NSHLD = 0x5, + NSHST = 0x6, + NSH = 0x7, + ISHLD = 0x9, + ISHST = 0xa, + ISH = 0xb, + LD = 0xd, + ST = 0xe, + SY = 0xf +}; + +inline static const char *getBarrierOptName(BarrierOption Opt) { + switch (Opt) { + default: return NULL; + case 0x1: return "oshld"; + case 0x2: return "oshst"; + case 0x3: return "osh"; + case 0x5: return "nshld"; + case 0x6: return "nshst"; + case 0x7: return "nsh"; + case 0x9: return "ishld"; + case 0xa: return "ishst"; + case 0xb: return "ish"; + case 0xd: return "ld"; + case 0xe: return "st"; + case 0xf: return "sy"; + } +} + +#define A64_SYSREG_ENC(op0,CRn,op2,CRm,op1) ((op0) << 14 | (op1) << 11 | \ + (CRn) << 7 | (CRm) << 3 | (op2)) +enum SystemRegister { + InvalidSystemReg = 0, + // Table in section 3.10.3 + SPSR_EL1 = 0xc200, + SPSR_svc = SPSR_EL1, + ELR_EL1 = 0xc201, + SP_EL0 = 0xc208, + SPSel = 0xc210, + CurrentEL = 0xc212, + DAIF = 0xda11, + NZCV = 0xda10, + FPCR = 0xda20, + FPSR = 0xda21, + DSPSR = 0xda28, + DLR = 0xda29, + SPSR_EL2 = 0xe200, + SPSR_hyp = SPSR_EL2, + ELR_EL2 = 0xe201, + SP_EL1 = 0xe208, + SPSR_irq = 0xe218, + SPSR_abt = 0xe219, + SPSR_und = 0xe21a, + SPSR_fiq = 0xe21b, + SPSR_EL3 = 0xf200, + ELR_EL3 = 0xf201, + SP_EL2 = 0xf208, + + + // Table in section 3.10.8 + MIDR_EL1 = 0xc000, + CTR_EL0 = 0xd801, + MPIDR_EL1 = 0xc005, + ECOIDR_EL1 = 0xc006, + DCZID_EL0 = 0xd807, + MVFR0_EL1 = 0xc018, + MVFR1_EL1 = 0xc019, + ID_AA64PFR0_EL1 = 0xc020, + ID_AA64PFR1_EL1 = 0xc021, + ID_AA64DFR0_EL1 = 0xc028, + ID_AA64DFR1_EL1 = 0xc029, + ID_AA64ISAR0_EL1 = 0xc030, + ID_AA64ISAR1_EL1 = 0xc031, + ID_AA64MMFR0_EL1 = 0xc038, + ID_AA64MMFR1_EL1 = 0xc039, + CCSIDR_EL1 = 0xc800, + CLIDR_EL1 = 0xc801, + AIDR_EL1 = 0xc807, + CSSELR_EL1 = 0xd000, + VPIDR_EL2 = 0xe000, + VMPIDR_EL2 = 0xe005, + SCTLR_EL1 = 0xc080, + SCTLR_EL2 = 0xe080, + SCTLR_EL3 = 0xf080, + ACTLR_EL1 = 0xc081, + ACTLR_EL2 = 0xe081, + ACTLR_EL3 = 0xf081, + CPACR_EL1 = 0xc082, + CPTR_EL2 = 0xe08a, + CPTR_EL3 = 0xf08a, + SCR_EL3 = 0xf088, + HCR_EL2 = 0xe088, + MDCR_EL2 = 0xe089, + MDCR_EL3 = 0xf099, + HSTR_EL2 = 0xe08b, + HACR_EL2 = 0xe08f, + TTBR0_EL1 = 0xc100, + TTBR1_EL1 = 0xc101, + TTBR0_EL2 = 0xe100, + TTBR0_EL3 = 0xf100, + VTTBR_EL2 = 0xe108, + TCR_EL1 = 0xc102, + TCR_EL2 = 0xe102, + TCR_EL3 = 0xf102, + VTCR_EL2 = 0xe10a, + ADFSR_EL1 = 0xc288, + AIFSR_EL1 = 0xc289, + ADFSR_EL2 = 0xe288, + AIFSR_EL2 = 0xe289, + ADFSR_EL3 = 0xf288, + AIFSR_EL3 = 0xf289, + ESR_EL1 = 0xc290, + ESR_EL2 = 0xe290, + ESR_EL3 = 0xf290, + FAR_EL1 = 0xc300, + FAR_EL2 = 0xe300, + FAR_EL3 = 0xf300, + HPFAR_EL2 = 0xe304, + PAR_EL1 = 0xc3a0, + MAIR_EL1 = 0xc510, + MAIR_EL2 = 0xe510, + MAIR_EL3 = 0xf510, + AMAIR_EL1 = 0xc518, + AMAIR_EL2 = 0xe518, + AMAIR_EL3 = 0xf518, + VBAR_EL1 = 0xc600, + VBAR_EL2 = 0xe600, + VBAR_EL3 = 0xf600, + RVBAR_EL1 = 0xc601, + RVBAR_EL2 = 0xe601, + RVBAR_EL3 = 0xf601, + ISR_EL1 = 0xc608, + CONTEXTIDR_EL1 = 0xc681, + TPIDR_EL0 = 0xde82, + TPIDRRO_EL0 = 0xde83, + TPIDR_EL1 = 0xc684, + TPIDR_EL2 = 0xe682, + TPIDR_EL3 = 0xf682, + TEECR32_EL1 = 0x9000, + CNTFRQ_EL0 = 0xdf00, + CNTPCT_EL0 = 0xdf01, + CNTVCT_EL0 = 0xdf02, + CNTVOFF_EL2 = 0xe703, + CNTKCTL_EL1 = 0xc708, + CNTHCTL_EL2 = 0xe708, + CNTP_TVAL_EL0 = 0xdf10, + CNTP_CTL_EL0 = 0xdf11, + CNTP_CVAL_EL0 = 0xdf12, + CNTV_TVAL_EL0 = 0xdf18, + CNTV_CTL_EL0 = 0xdf19, + CNTV_CVAL_EL0 = 0xdf1a, + CNTHP_TVAL_EL2 = 0xe710, + CNTHP_CTL_EL2 = 0xe711, + CNTHP_CVAL_EL2 = 0xe712, + CNTPS_TVAL_EL1 = 0xff10, + CNTPS_CTL_EL1 = 0xff11, + CNTPS_CVAL_EL1= 0xff12, + + PMEVCNTR0_EL0 = 0xdf40, + PMEVCNTR1_EL0 = 0xdf41, + PMEVCNTR2_EL0 = 0xdf42, + PMEVCNTR3_EL0 = 0xdf43, + PMEVCNTR4_EL0 = 0xdf44, + PMEVCNTR5_EL0 = 0xdf45, + PMEVCNTR6_EL0 = 0xdf46, + PMEVCNTR7_EL0 = 0xdf47, + PMEVCNTR8_EL0 = 0xdf48, + PMEVCNTR9_EL0 = 0xdf49, + PMEVCNTR10_EL0 = 0xdf4a, + PMEVCNTR11_EL0 = 0xdf4b, + PMEVCNTR12_EL0 = 0xdf4c, + PMEVCNTR13_EL0 = 0xdf4d, + PMEVCNTR14_EL0 = 0xdf4e, + PMEVCNTR15_EL0 = 0xdf4f, + PMEVCNTR16_EL0 = 0xdf50, + PMEVCNTR17_EL0 = 0xdf51, + PMEVCNTR18_EL0 = 0xdf52, + PMEVCNTR19_EL0 = 0xdf53, + PMEVCNTR20_EL0 = 0xdf54, + PMEVCNTR21_EL0 = 0xdf55, + PMEVCNTR22_EL0 = 0xdf56, + PMEVCNTR23_EL0 = 0xdf57, + PMEVCNTR24_EL0 = 0xdf58, + PMEVCNTR25_EL0 = 0xdf59, + PMEVCNTR26_EL0 = 0xdf5a, + PMEVCNTR27_EL0 = 0xdf5b, + PMEVCNTR28_EL0 = 0xdf5c, + PMEVCNTR29_EL0 = 0xdf5d, + PMEVCNTR30_EL0 = 0xdf5e, + + PMEVTYPER0_EL0 = 0xdf60, + PMEVTYPER1_EL0 = 0xdf61, + PMEVTYPER2_EL0 = 0xdf62, + PMEVTYPER3_EL0 = 0xdf63, + PMEVTYPER4_EL0 = 0xdf64, + PMEVTYPER5_EL0 = 0xdf65, + PMEVTYPER6_EL0 = 0xdf66, + PMEVTYPER7_EL0 = 0xdf67, + PMEVTYPER8_EL0 = 0xdf68, + PMEVTYPER9_EL0 = 0xdf69, + PMEVTYPER10_EL0 = 0xdf6a, + PMEVTYPER11_EL0 = 0xdf6b, + PMEVTYPER12_EL0 = 0xdf6c, + PMEVTYPER13_EL0 = 0xdf6d, + PMEVTYPER14_EL0 = 0xdf6e, + PMEVTYPER15_EL0 = 0xdf6f, + PMEVTYPER16_EL0 = 0xdf70, + PMEVTYPER17_EL0 = 0xdf71, + PMEVTYPER18_EL0 = 0xdf72, + PMEVTYPER19_EL0 = 0xdf73, + PMEVTYPER20_EL0 = 0xdf74, + PMEVTYPER21_EL0 = 0xdf75, + PMEVTYPER22_EL0 = 0xdf76, + PMEVTYPER23_EL0 = 0xdf77, + PMEVTYPER24_EL0 = 0xdf78, + PMEVTYPER25_EL0 = 0xdf79, + PMEVTYPER26_EL0 = 0xdf7a, + PMEVTYPER27_EL0 = 0xdf7b, + PMEVTYPER28_EL0 = 0xdf7c, + PMEVTYPER29_EL0 = 0xdf7d, + PMEVTYPER30_EL0 = 0xdf7e, + + PMCCFILTR_EL0 = 0xdf7f, + + RMR_EL3 = 0xf602, + RMR_EL2 = 0xd602, + RMR_EL1 = 0xce02, + + // Debug Architecture 5.3, Table 17. + MDCCSR_EL0 = A64_SYSREG_ENC(2, 0, 0, 1, 3), + MDCCINT_EL1 = A64_SYSREG_ENC(2, 0, 0, 2, 0), + DBGDTR_EL0 = A64_SYSREG_ENC(2, 0, 0, 4, 3), + DBGDTRRX_EL0 = A64_SYSREG_ENC(2, 0, 0, 5, 3), + DBGDTRTX_EL0 = DBGDTRRX_EL0, + DBGVCR32_EL2 = A64_SYSREG_ENC(2, 0, 0, 7, 4), + OSDTRRX_EL1 = A64_SYSREG_ENC(2, 0, 2, 0, 0), + MDSCR_EL1 = A64_SYSREG_ENC(2, 0, 2, 2, 0), + OSDTRTX_EL1 = A64_SYSREG_ENC(2, 0, 2, 3, 0), + OSECCR_EL11 = A64_SYSREG_ENC(2, 0, 2, 6, 0), + + DBGBVR0_EL1 = A64_SYSREG_ENC(2, 0, 4, 0, 0), + DBGBVR1_EL1 = A64_SYSREG_ENC(2, 0, 4, 1, 0), + DBGBVR2_EL1 = A64_SYSREG_ENC(2, 0, 4, 2, 0), + DBGBVR3_EL1 = A64_SYSREG_ENC(2, 0, 4, 3, 0), + DBGBVR4_EL1 = A64_SYSREG_ENC(2, 0, 4, 4, 0), + DBGBVR5_EL1 = A64_SYSREG_ENC(2, 0, 4, 5, 0), + DBGBVR6_EL1 = A64_SYSREG_ENC(2, 0, 4, 6, 0), + DBGBVR7_EL1 = A64_SYSREG_ENC(2, 0, 4, 7, 0), + DBGBVR8_EL1 = A64_SYSREG_ENC(2, 0, 4, 8, 0), + DBGBVR9_EL1 = A64_SYSREG_ENC(2, 0, 4, 9, 0), + DBGBVR10_EL1 = A64_SYSREG_ENC(2, 0, 4, 10, 0), + DBGBVR11_EL1 = A64_SYSREG_ENC(2, 0, 4, 11, 0), + DBGBVR12_EL1 = A64_SYSREG_ENC(2, 0, 4, 12, 0), + DBGBVR13_EL1 = A64_SYSREG_ENC(2, 0, 4, 13, 0), + DBGBVR14_EL1 = A64_SYSREG_ENC(2, 0, 4, 14, 0), + DBGBVR15_EL1 = A64_SYSREG_ENC(2, 0, 4, 15, 0), + + DBGBCR0_EL1 = A64_SYSREG_ENC(2, 0, 5, 0, 0), + DBGBCR1_EL1 = A64_SYSREG_ENC(2, 0, 5, 1, 0), + DBGBCR2_EL1 = A64_SYSREG_ENC(2, 0, 5, 2, 0), + DBGBCR3_EL1 = A64_SYSREG_ENC(2, 0, 5, 3, 0), + DBGBCR4_EL1 = A64_SYSREG_ENC(2, 0, 5, 4, 0), + DBGBCR5_EL1 = A64_SYSREG_ENC(2, 0, 5, 5, 0), + DBGBCR6_EL1 = A64_SYSREG_ENC(2, 0, 5, 6, 0), + DBGBCR7_EL1 = A64_SYSREG_ENC(2, 0, 5, 7, 0), + DBGBCR8_EL1 = A64_SYSREG_ENC(2, 0, 5, 8, 0), + DBGBCR9_EL1 = A64_SYSREG_ENC(2, 0, 5, 9, 0), + DBGBCR10_EL1 = A64_SYSREG_ENC(2, 0, 5, 10, 0), + DBGBCR11_EL1 = A64_SYSREG_ENC(2, 0, 5, 11, 0), + DBGBCR12_EL1 = A64_SYSREG_ENC(2, 0, 5, 12, 0), + DBGBCR13_EL1 = A64_SYSREG_ENC(2, 0, 5, 13, 0), + DBGBCR14_EL1 = A64_SYSREG_ENC(2, 0, 5, 14, 0), + DBGBCR15_EL1 = A64_SYSREG_ENC(2, 0, 5, 15, 0), + + DBGWVR0_EL1 = A64_SYSREG_ENC(2, 0, 6, 0, 0), + DBGWVR1_EL1 = A64_SYSREG_ENC(2, 0, 6, 1, 0), + DBGWVR2_EL1 = A64_SYSREG_ENC(2, 0, 6, 2, 0), + DBGWVR3_EL1 = A64_SYSREG_ENC(2, 0, 6, 3, 0), + DBGWVR4_EL1 = A64_SYSREG_ENC(2, 0, 6, 4, 0), + DBGWVR5_EL1 = A64_SYSREG_ENC(2, 0, 6, 5, 0), + DBGWVR6_EL1 = A64_SYSREG_ENC(2, 0, 6, 6, 0), + DBGWVR7_EL1 = A64_SYSREG_ENC(2, 0, 6, 7, 0), + DBGWVR8_EL1 = A64_SYSREG_ENC(2, 0, 6, 8, 0), + DBGWVR9_EL1 = A64_SYSREG_ENC(2, 0, 6, 9, 0), + DBGWVR10_EL1 = A64_SYSREG_ENC(2, 0, 6, 10, 0), + DBGWVR11_EL1 = A64_SYSREG_ENC(2, 0, 6, 11, 0), + DBGWVR12_EL1 = A64_SYSREG_ENC(2, 0, 6, 12, 0), + DBGWVR13_EL1 = A64_SYSREG_ENC(2, 0, 6, 13, 0), + DBGWVR14_EL1 = A64_SYSREG_ENC(2, 0, 6, 14, 0), + DBGWVR15_EL1 = A64_SYSREG_ENC(2, 0, 6, 15, 0), + + DBGWCR0_EL1 = A64_SYSREG_ENC(2, 0, 7, 0, 0), + DBGWCR1_EL1 = A64_SYSREG_ENC(2, 0, 7, 1, 0), + DBGWCR2_EL1 = A64_SYSREG_ENC(2, 0, 7, 2, 0), + DBGWCR3_EL1 = A64_SYSREG_ENC(2, 0, 7, 3, 0), + DBGWCR4_EL1 = A64_SYSREG_ENC(2, 0, 7, 4, 0), + DBGWCR5_EL1 = A64_SYSREG_ENC(2, 0, 7, 5, 0), + DBGWCR6_EL1 = A64_SYSREG_ENC(2, 0, 7, 6, 0), + DBGWCR7_EL1 = A64_SYSREG_ENC(2, 0, 7, 7, 0), + DBGWCR8_EL1 = A64_SYSREG_ENC(2, 0, 7, 8, 0), + DBGWCR9_EL1 = A64_SYSREG_ENC(2, 0, 7, 9, 0), + DBGWCR10_EL1 = A64_SYSREG_ENC(2, 0, 7, 10, 0), + DBGWCR11_EL1 = A64_SYSREG_ENC(2, 0, 7, 11, 0), + DBGWCR12_EL1 = A64_SYSREG_ENC(2, 0, 7, 12, 0), + DBGWCR13_EL1 = A64_SYSREG_ENC(2, 0, 7, 13, 0), + DBGWCR14_EL1 = A64_SYSREG_ENC(2, 0, 7, 14, 0), + DBGWCR15_EL1 = A64_SYSREG_ENC(2, 0, 7, 15, 0), + + MDRAR_EL1 = A64_SYSREG_ENC(2, 1, 0, 0, 0), + OSLAR_EL1 = A64_SYSREG_ENC(2, 1, 4, 0, 0), + OSLSR_EL1 = A64_SYSREG_ENC(2, 1, 4, 1, 0), + OSDLR_EL1 = A64_SYSREG_ENC(2, 1, 4, 3, 0), + DBGPRCR_EL1 = A64_SYSREG_ENC(2, 1, 4, 4, 0), + + DBGCLAIMSET_EL1 = A64_SYSREG_ENC(2, 7, 6, 8, 0), + DBGCLAIMCLR_EL1 = A64_SYSREG_ENC(2, 7, 6, 9, 0), + DBGAUTHSTATUS_EL1 = A64_SYSREG_ENC(2, 7, 6, 14, 0), + + DBGDEVID2 = A64_SYSREG_ENC(2, 7, 7, 0, 0), + DBGDEVID1 = A64_SYSREG_ENC(2, 7, 7, 1, 0), + DBGDEVID0 = A64_SYSREG_ENC(2, 7, 7, 2, 0), + + // The following registers are defined to allow access from AArch64 to + // registers which are only used in the AArch32 architecture. + DACR32_EL2 = 0xe180, + IFSR32_EL2 = 0xe281, + TEEHBR32_EL1 = 0x9080, + SDER32_EL3 = 0xf089, + FPEXC32_EL2 = 0xe298, + + // Cyclone specific system registers + CPM_IOACC_CTL_EL3 = 0xff90, + + // Architectural system registers + ID_PFR0_EL1 = 0xc008, + ID_PFR1_EL1 = 0xc009, + ID_DFR0_EL1 = 0xc00a, + ID_AFR0_EL1 = 0xc00b, + ID_ISAR0_EL1 = 0xc010, + ID_ISAR1_EL1 = 0xc011, + ID_ISAR2_EL1 = 0xc012, + ID_ISAR3_EL1 = 0xc013, + ID_ISAR4_EL1 = 0xc014, + ID_ISAR5_EL1 = 0xc015, + AFSR1_EL1 = 0xc289, // note same as old AIFSR_EL1 + AFSR0_EL1 = 0xc288, // note same as old ADFSR_EL1 + REVIDR_EL1 = 0xc006 // note same as old ECOIDR_EL1 + +}; +#undef A64_SYSREG_ENC + +static inline const char *getSystemRegisterName(SystemRegister Reg) { + switch(Reg) { + default: return NULL; // Caller is responsible for handling invalid value. + case SPSR_EL1: return "SPSR_EL1"; + case ELR_EL1: return "ELR_EL1"; + case SP_EL0: return "SP_EL0"; + case SPSel: return "SPSel"; + case DAIF: return "DAIF"; + case CurrentEL: return "CurrentEL"; + case NZCV: return "NZCV"; + case FPCR: return "FPCR"; + case FPSR: return "FPSR"; + case DSPSR: return "DSPSR"; + case DLR: return "DLR"; + case SPSR_EL2: return "SPSR_EL2"; + case ELR_EL2: return "ELR_EL2"; + case SP_EL1: return "SP_EL1"; + case SPSR_irq: return "SPSR_irq"; + case SPSR_abt: return "SPSR_abt"; + case SPSR_und: return "SPSR_und"; + case SPSR_fiq: return "SPSR_fiq"; + case SPSR_EL3: return "SPSR_EL3"; + case ELR_EL3: return "ELR_EL3"; + case SP_EL2: return "SP_EL2"; + case MIDR_EL1: return "MIDR_EL1"; + case CTR_EL0: return "CTR_EL0"; + case MPIDR_EL1: return "MPIDR_EL1"; + case DCZID_EL0: return "DCZID_EL0"; + case MVFR0_EL1: return "MVFR0_EL1"; + case MVFR1_EL1: return "MVFR1_EL1"; + case ID_AA64PFR0_EL1: return "ID_AA64PFR0_EL1"; + case ID_AA64PFR1_EL1: return "ID_AA64PFR1_EL1"; + case ID_AA64DFR0_EL1: return "ID_AA64DFR0_EL1"; + case ID_AA64DFR1_EL1: return "ID_AA64DFR1_EL1"; + case ID_AA64ISAR0_EL1: return "ID_AA64ISAR0_EL1"; + case ID_AA64ISAR1_EL1: return "ID_AA64ISAR1_EL1"; + case ID_AA64MMFR0_EL1: return "ID_AA64MMFR0_EL1"; + case ID_AA64MMFR1_EL1: return "ID_AA64MMFR1_EL1"; + case CCSIDR_EL1: return "CCSIDR_EL1"; + case CLIDR_EL1: return "CLIDR_EL1"; + case AIDR_EL1: return "AIDR_EL1"; + case CSSELR_EL1: return "CSSELR_EL1"; + case VPIDR_EL2: return "VPIDR_EL2"; + case VMPIDR_EL2: return "VMPIDR_EL2"; + case SCTLR_EL1: return "SCTLR_EL1"; + case SCTLR_EL2: return "SCTLR_EL2"; + case SCTLR_EL3: return "SCTLR_EL3"; + case ACTLR_EL1: return "ACTLR_EL1"; + case ACTLR_EL2: return "ACTLR_EL2"; + case ACTLR_EL3: return "ACTLR_EL3"; + case CPACR_EL1: return "CPACR_EL1"; + case CPTR_EL2: return "CPTR_EL2"; + case CPTR_EL3: return "CPTR_EL3"; + case SCR_EL3: return "SCR_EL3"; + case HCR_EL2: return "HCR_EL2"; + case MDCR_EL2: return "MDCR_EL2"; + case MDCR_EL3: return "MDCR_EL3"; + case HSTR_EL2: return "HSTR_EL2"; + case HACR_EL2: return "HACR_EL2"; + case TTBR0_EL1: return "TTBR0_EL1"; + case TTBR1_EL1: return "TTBR1_EL1"; + case TTBR0_EL2: return "TTBR0_EL2"; + case TTBR0_EL3: return "TTBR0_EL3"; + case VTTBR_EL2: return "VTTBR_EL2"; + case TCR_EL1: return "TCR_EL1"; + case TCR_EL2: return "TCR_EL2"; + case TCR_EL3: return "TCR_EL3"; + case VTCR_EL2: return "VTCR_EL2"; + case ADFSR_EL2: return "ADFSR_EL2"; + case AIFSR_EL2: return "AIFSR_EL2"; + case ADFSR_EL3: return "ADFSR_EL3"; + case AIFSR_EL3: return "AIFSR_EL3"; + case ESR_EL1: return "ESR_EL1"; + case ESR_EL2: return "ESR_EL2"; + case ESR_EL3: return "ESR_EL3"; + case FAR_EL1: return "FAR_EL1"; + case FAR_EL2: return "FAR_EL2"; + case FAR_EL3: return "FAR_EL3"; + case HPFAR_EL2: return "HPFAR_EL2"; + case PAR_EL1: return "PAR_EL1"; + case MAIR_EL1: return "MAIR_EL1"; + case MAIR_EL2: return "MAIR_EL2"; + case MAIR_EL3: return "MAIR_EL3"; + case AMAIR_EL1: return "AMAIR_EL1"; + case AMAIR_EL2: return "AMAIR_EL2"; + case AMAIR_EL3: return "AMAIR_EL3"; + case VBAR_EL1: return "VBAR_EL1"; + case VBAR_EL2: return "VBAR_EL2"; + case VBAR_EL3: return "VBAR_EL3"; + case RVBAR_EL1: return "RVBAR_EL1"; + case RVBAR_EL2: return "RVBAR_EL2"; + case RVBAR_EL3: return "RVBAR_EL3"; + case ISR_EL1: return "ISR_EL1"; + case CONTEXTIDR_EL1: return "CONTEXTIDR_EL1"; + case TPIDR_EL0: return "TPIDR_EL0"; + case TPIDRRO_EL0: return "TPIDRRO_EL0"; + case TPIDR_EL1: return "TPIDR_EL1"; + case TPIDR_EL2: return "TPIDR_EL2"; + case TPIDR_EL3: return "TPIDR_EL3"; + case TEECR32_EL1: return "TEECR32_EL1"; + case CNTFRQ_EL0: return "CNTFRQ_EL0"; + case CNTPCT_EL0: return "CNTPCT_EL0"; + case CNTVCT_EL0: return "CNTVCT_EL0"; + case CNTVOFF_EL2: return "CNTVOFF_EL2"; + case CNTKCTL_EL1: return "CNTKCTL_EL1"; + case CNTHCTL_EL2: return "CNTHCTL_EL2"; + case CNTP_TVAL_EL0: return "CNTP_TVAL_EL0"; + case CNTP_CTL_EL0: return "CNTP_CTL_EL0"; + case CNTP_CVAL_EL0: return "CNTP_CVAL_EL0"; + case CNTV_TVAL_EL0: return "CNTV_TVAL_EL0"; + case CNTV_CTL_EL0: return "CNTV_CTL_EL0"; + case CNTV_CVAL_EL0: return "CNTV_CVAL_EL0"; + case CNTHP_TVAL_EL2: return "CNTHP_TVAL_EL2"; + case CNTHP_CTL_EL2: return "CNTHP_CTL_EL2"; + case CNTHP_CVAL_EL2: return "CNTHP_CVAL_EL2"; + case CNTPS_TVAL_EL1: return "CNTPS_TVAL_EL1"; + case CNTPS_CTL_EL1: return "CNTPS_CTL_EL1"; + case CNTPS_CVAL_EL1: return "CNTPS_CVAL_EL1"; + case DACR32_EL2: return "DACR32_EL2"; + case IFSR32_EL2: return "IFSR32_EL2"; + case TEEHBR32_EL1: return "TEEHBR32_EL1"; + case SDER32_EL3: return "SDER32_EL3"; + case FPEXC32_EL2: return "FPEXC32_EL2"; + case PMEVCNTR0_EL0: return "PMEVCNTR0_EL0"; + case PMEVCNTR1_EL0: return "PMEVCNTR1_EL0"; + case PMEVCNTR2_EL0: return "PMEVCNTR2_EL0"; + case PMEVCNTR3_EL0: return "PMEVCNTR3_EL0"; + case PMEVCNTR4_EL0: return "PMEVCNTR4_EL0"; + case PMEVCNTR5_EL0: return "PMEVCNTR5_EL0"; + case PMEVCNTR6_EL0: return "PMEVCNTR6_EL0"; + case PMEVCNTR7_EL0: return "PMEVCNTR7_EL0"; + case PMEVCNTR8_EL0: return "PMEVCNTR8_EL0"; + case PMEVCNTR9_EL0: return "PMEVCNTR9_EL0"; + case PMEVCNTR10_EL0: return "PMEVCNTR10_EL0"; + case PMEVCNTR11_EL0: return "PMEVCNTR11_EL0"; + case PMEVCNTR12_EL0: return "PMEVCNTR12_EL0"; + case PMEVCNTR13_EL0: return "PMEVCNTR13_EL0"; + case PMEVCNTR14_EL0: return "PMEVCNTR14_EL0"; + case PMEVCNTR15_EL0: return "PMEVCNTR15_EL0"; + case PMEVCNTR16_EL0: return "PMEVCNTR16_EL0"; + case PMEVCNTR17_EL0: return "PMEVCNTR17_EL0"; + case PMEVCNTR18_EL0: return "PMEVCNTR18_EL0"; + case PMEVCNTR19_EL0: return "PMEVCNTR19_EL0"; + case PMEVCNTR20_EL0: return "PMEVCNTR20_EL0"; + case PMEVCNTR21_EL0: return "PMEVCNTR21_EL0"; + case PMEVCNTR22_EL0: return "PMEVCNTR22_EL0"; + case PMEVCNTR23_EL0: return "PMEVCNTR23_EL0"; + case PMEVCNTR24_EL0: return "PMEVCNTR24_EL0"; + case PMEVCNTR25_EL0: return "PMEVCNTR25_EL0"; + case PMEVCNTR26_EL0: return "PMEVCNTR26_EL0"; + case PMEVCNTR27_EL0: return "PMEVCNTR27_EL0"; + case PMEVCNTR28_EL0: return "PMEVCNTR28_EL0"; + case PMEVCNTR29_EL0: return "PMEVCNTR29_EL0"; + case PMEVCNTR30_EL0: return "PMEVCNTR30_EL0"; + case PMEVTYPER0_EL0: return "PMEVTYPER0_EL0"; + case PMEVTYPER1_EL0: return "PMEVTYPER1_EL0"; + case PMEVTYPER2_EL0: return "PMEVTYPER2_EL0"; + case PMEVTYPER3_EL0: return "PMEVTYPER3_EL0"; + case PMEVTYPER4_EL0: return "PMEVTYPER4_EL0"; + case PMEVTYPER5_EL0: return "PMEVTYPER5_EL0"; + case PMEVTYPER6_EL0: return "PMEVTYPER6_EL0"; + case PMEVTYPER7_EL0: return "PMEVTYPER7_EL0"; + case PMEVTYPER8_EL0: return "PMEVTYPER8_EL0"; + case PMEVTYPER9_EL0: return "PMEVTYPER9_EL0"; + case PMEVTYPER10_EL0: return "PMEVTYPER10_EL0"; + case PMEVTYPER11_EL0: return "PMEVTYPER11_EL0"; + case PMEVTYPER12_EL0: return "PMEVTYPER12_EL0"; + case PMEVTYPER13_EL0: return "PMEVTYPER13_EL0"; + case PMEVTYPER14_EL0: return "PMEVTYPER14_EL0"; + case PMEVTYPER15_EL0: return "PMEVTYPER15_EL0"; + case PMEVTYPER16_EL0: return "PMEVTYPER16_EL0"; + case PMEVTYPER17_EL0: return "PMEVTYPER17_EL0"; + case PMEVTYPER18_EL0: return "PMEVTYPER18_EL0"; + case PMEVTYPER19_EL0: return "PMEVTYPER19_EL0"; + case PMEVTYPER20_EL0: return "PMEVTYPER20_EL0"; + case PMEVTYPER21_EL0: return "PMEVTYPER21_EL0"; + case PMEVTYPER22_EL0: return "PMEVTYPER22_EL0"; + case PMEVTYPER23_EL0: return "PMEVTYPER23_EL0"; + case PMEVTYPER24_EL0: return "PMEVTYPER24_EL0"; + case PMEVTYPER25_EL0: return "PMEVTYPER25_EL0"; + case PMEVTYPER26_EL0: return "PMEVTYPER26_EL0"; + case PMEVTYPER27_EL0: return "PMEVTYPER27_EL0"; + case PMEVTYPER28_EL0: return "PMEVTYPER28_EL0"; + case PMEVTYPER29_EL0: return "PMEVTYPER29_EL0"; + case PMEVTYPER30_EL0: return "PMEVTYPER30_EL0"; + case PMCCFILTR_EL0: return "PMCCFILTR_EL0"; + case RMR_EL3: return "RMR_EL3"; + case RMR_EL2: return "RMR_EL2"; + case RMR_EL1: return "RMR_EL1"; + case CPM_IOACC_CTL_EL3: return "CPM_IOACC_CTL_EL3"; + case MDCCSR_EL0: return "MDCCSR_EL0"; + case MDCCINT_EL1: return "MDCCINT_EL1"; + case DBGDTR_EL0: return "DBGDTR_EL0"; + case DBGDTRRX_EL0: return "DBGDTRRX_EL0"; + case DBGVCR32_EL2: return "DBGVCR32_EL2"; + case OSDTRRX_EL1: return "OSDTRRX_EL1"; + case MDSCR_EL1: return "MDSCR_EL1"; + case OSDTRTX_EL1: return "OSDTRTX_EL1"; + case OSECCR_EL11: return "OSECCR_EL11"; + case DBGBVR0_EL1: return "DBGBVR0_EL1"; + case DBGBVR1_EL1: return "DBGBVR1_EL1"; + case DBGBVR2_EL1: return "DBGBVR2_EL1"; + case DBGBVR3_EL1: return "DBGBVR3_EL1"; + case DBGBVR4_EL1: return "DBGBVR4_EL1"; + case DBGBVR5_EL1: return "DBGBVR5_EL1"; + case DBGBVR6_EL1: return "DBGBVR6_EL1"; + case DBGBVR7_EL1: return "DBGBVR7_EL1"; + case DBGBVR8_EL1: return "DBGBVR8_EL1"; + case DBGBVR9_EL1: return "DBGBVR9_EL1"; + case DBGBVR10_EL1: return "DBGBVR10_EL1"; + case DBGBVR11_EL1: return "DBGBVR11_EL1"; + case DBGBVR12_EL1: return "DBGBVR12_EL1"; + case DBGBVR13_EL1: return "DBGBVR13_EL1"; + case DBGBVR14_EL1: return "DBGBVR14_EL1"; + case DBGBVR15_EL1: return "DBGBVR15_EL1"; + case DBGBCR0_EL1: return "DBGBCR0_EL1"; + case DBGBCR1_EL1: return "DBGBCR1_EL1"; + case DBGBCR2_EL1: return "DBGBCR2_EL1"; + case DBGBCR3_EL1: return "DBGBCR3_EL1"; + case DBGBCR4_EL1: return "DBGBCR4_EL1"; + case DBGBCR5_EL1: return "DBGBCR5_EL1"; + case DBGBCR6_EL1: return "DBGBCR6_EL1"; + case DBGBCR7_EL1: return "DBGBCR7_EL1"; + case DBGBCR8_EL1: return "DBGBCR8_EL1"; + case DBGBCR9_EL1: return "DBGBCR9_EL1"; + case DBGBCR10_EL1: return "DBGBCR10_EL1"; + case DBGBCR11_EL1: return "DBGBCR11_EL1"; + case DBGBCR12_EL1: return "DBGBCR12_EL1"; + case DBGBCR13_EL1: return "DBGBCR13_EL1"; + case DBGBCR14_EL1: return "DBGBCR14_EL1"; + case DBGBCR15_EL1: return "DBGBCR15_EL1"; + case DBGWVR0_EL1: return "DBGWVR0_EL1"; + case DBGWVR1_EL1: return "DBGWVR1_EL1"; + case DBGWVR2_EL1: return "DBGWVR2_EL1"; + case DBGWVR3_EL1: return "DBGWVR3_EL1"; + case DBGWVR4_EL1: return "DBGWVR4_EL1"; + case DBGWVR5_EL1: return "DBGWVR5_EL1"; + case DBGWVR6_EL1: return "DBGWVR6_EL1"; + case DBGWVR7_EL1: return "DBGWVR7_EL1"; + case DBGWVR8_EL1: return "DBGWVR8_EL1"; + case DBGWVR9_EL1: return "DBGWVR9_EL1"; + case DBGWVR10_EL1: return "DBGWVR10_EL1"; + case DBGWVR11_EL1: return "DBGWVR11_EL1"; + case DBGWVR12_EL1: return "DBGWVR12_EL1"; + case DBGWVR13_EL1: return "DBGWVR13_EL1"; + case DBGWVR14_EL1: return "DBGWVR14_EL1"; + case DBGWVR15_EL1: return "DBGWVR15_EL1"; + case DBGWCR0_EL1: return "DBGWCR0_EL1"; + case DBGWCR1_EL1: return "DBGWCR1_EL1"; + case DBGWCR2_EL1: return "DBGWCR2_EL1"; + case DBGWCR3_EL1: return "DBGWCR3_EL1"; + case DBGWCR4_EL1: return "DBGWCR4_EL1"; + case DBGWCR5_EL1: return "DBGWCR5_EL1"; + case DBGWCR6_EL1: return "DBGWCR6_EL1"; + case DBGWCR7_EL1: return "DBGWCR7_EL1"; + case DBGWCR8_EL1: return "DBGWCR8_EL1"; + case DBGWCR9_EL1: return "DBGWCR9_EL1"; + case DBGWCR10_EL1: return "DBGWCR10_EL1"; + case DBGWCR11_EL1: return "DBGWCR11_EL1"; + case DBGWCR12_EL1: return "DBGWCR12_EL1"; + case DBGWCR13_EL1: return "DBGWCR13_EL1"; + case DBGWCR14_EL1: return "DBGWCR14_EL1"; + case DBGWCR15_EL1: return "DBGWCR15_EL1"; + case MDRAR_EL1: return "MDRAR_EL1"; + case OSLAR_EL1: return "OSLAR_EL1"; + case OSLSR_EL1: return "OSLSR_EL1"; + case OSDLR_EL1: return "OSDLR_EL1"; + case DBGPRCR_EL1: return "DBGPRCR_EL1"; + case DBGCLAIMSET_EL1: return "DBGCLAIMSET_EL1"; + case DBGCLAIMCLR_EL1: return "DBGCLAIMCLR_EL1"; + case DBGAUTHSTATUS_EL1: return "DBGAUTHSTATUS_EL1"; + case DBGDEVID2: return "DBGDEVID2"; + case DBGDEVID1: return "DBGDEVID1"; + case DBGDEVID0: return "DBGDEVID0"; + case ID_PFR0_EL1: return "ID_PFR0_EL1"; + case ID_PFR1_EL1: return "ID_PFR1_EL1"; + case ID_DFR0_EL1: return "ID_DFR0_EL1"; + case ID_AFR0_EL1: return "ID_AFR0_EL1"; + case ID_ISAR0_EL1: return "ID_ISAR0_EL1"; + case ID_ISAR1_EL1: return "ID_ISAR1_EL1"; + case ID_ISAR2_EL1: return "ID_ISAR2_EL1"; + case ID_ISAR3_EL1: return "ID_ISAR3_EL1"; + case ID_ISAR4_EL1: return "ID_ISAR4_EL1"; + case ID_ISAR5_EL1: return "ID_ISAR5_EL1"; + case AFSR1_EL1: return "AFSR1_EL1"; + case AFSR0_EL1: return "AFSR0_EL1"; + case REVIDR_EL1: return "REVIDR_EL1"; + } +} + +enum CPSRField { + InvalidCPSRField = 0xff, + cpsr_SPSel = 0x5, + cpsr_DAIFSet = 0x1e, + cpsr_DAIFClr = 0x1f +}; + +static inline const char *getCPSRFieldName(CPSRField Val) { + switch(Val) { + default: assert(0 && "Invalid system register value!"); + case cpsr_SPSel: return "SPSel"; + case cpsr_DAIFSet: return "DAIFSet"; + case cpsr_DAIFClr: return "DAIFClr"; + } +} + +} // end namespace ARM64SYS + +namespace ARM64II { + /// Target Operand Flag enum. + enum TOF { + //===------------------------------------------------------------------===// + // ARM64 Specific MachineOperand flags. + + MO_NO_FLAG, + + MO_FRAGMENT = 0x7, + + /// MO_PAGE - A symbol operand with this flag represents the pc-relative + /// offset of the 4K page containing the symbol. This is used with the + /// ADRP instruction. + MO_PAGE = 1, + + /// MO_PAGEOFF - A symbol operand with this flag represents the offset of + /// that symbol within a 4K page. This offset is added to the page address + /// to produce the complete address. + MO_PAGEOFF = 2, + + /// MO_G3 - A symbol operand with this flag (granule 3) represents the high + /// 16-bits of a 64-bit address, used in a MOVZ or MOVK instruction + MO_G3 = 3, + + /// MO_G2 - A symbol operand with this flag (granule 2) represents the bits + /// 32-47 of a 64-bit address, used in a MOVZ or MOVK instruction + MO_G2 = 4, + + /// MO_G1 - A symbol operand with this flag (granule 1) represents the bits + /// 16-31 of a 64-bit address, used in a MOVZ or MOVK instruction + MO_G1 = 5, + + /// MO_G0 - A symbol operand with this flag (granule 0) represents the bits + /// 0-15 of a 64-bit address, used in a MOVZ or MOVK instruction + MO_G0 = 6, + + /// MO_GOT - This flag indicates that a symbol operand represents the + /// address of the GOT entry for the symbol, rather than the address of + /// the symbol itself. + MO_GOT = 8, + + /// MO_NC - Indicates whether the linker is expected to check the symbol + /// reference for overflow. For example in an ADRP/ADD pair of relocations + /// the ADRP usually does check, but not the ADD. + MO_NC = 0x10, + + /// MO_TLS - Indicates that the operand being accessed is some kind of + /// thread-local symbol. On Darwin, only one type of thread-local access + /// exists (pre linker-relaxation), but on ELF the TLSModel used for the + /// referee will affect interpretation. + MO_TLS = 0x20 + }; +} // end namespace ARM64II + +} // end namespace llvm + +#endif diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64ELFObjectWriter.cpp b/lib/Target/ARM64/MCTargetDesc/ARM64ELFObjectWriter.cpp new file mode 100644 index 0000000..1a132a1 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64ELFObjectWriter.cpp @@ -0,0 +1,237 @@ +//===-- ARM64ELFObjectWriter.cpp - ARM64 ELF Writer -----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file handles ELF-specific object emission, converting LLVM's internal +// fixups into the appropriate relocations. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/ARM64FixupKinds.h" +#include "MCTargetDesc/ARM64MCExpr.h" +#include "MCTargetDesc/ARM64MCTargetDesc.h" +#include "llvm/MC/MCELFObjectWriter.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace llvm; + +namespace { +class ARM64ELFObjectWriter : public MCELFObjectTargetWriter { +public: + ARM64ELFObjectWriter(uint8_t OSABI); + + virtual ~ARM64ELFObjectWriter(); + +protected: + unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup, + bool IsPCRel) const override; + +private: +}; +} + +ARM64ELFObjectWriter::ARM64ELFObjectWriter(uint8_t OSABI) + : MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_AARCH64, + /*HasRelocationAddend*/ true) {} + +ARM64ELFObjectWriter::~ARM64ELFObjectWriter() {} + +unsigned ARM64ELFObjectWriter::GetRelocType(const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) const { + ARM64MCExpr::VariantKind RefKind = + static_cast<ARM64MCExpr::VariantKind>(Target.getRefKind()); + ARM64MCExpr::VariantKind SymLoc = ARM64MCExpr::getSymbolLoc(RefKind); + bool IsNC = ARM64MCExpr::isNotChecked(RefKind); + + assert((!Target.getSymA() || + Target.getSymA()->getKind() == MCSymbolRefExpr::VK_None) && + "Should only be expression-level modifiers here"); + + assert((!Target.getSymB() || + Target.getSymB()->getKind() == MCSymbolRefExpr::VK_None) && + "Should only be expression-level modifiers here"); + + if (IsPCRel) { + switch ((unsigned)Fixup.getKind()) { + case FK_Data_2: + return ELF::R_AARCH64_PREL16; + case FK_Data_4: + return ELF::R_AARCH64_PREL32; + case FK_Data_8: + return ELF::R_AARCH64_PREL64; + case ARM64::fixup_arm64_pcrel_adr_imm21: + llvm_unreachable("No ELF relocations supported for ADR at the moment"); + case ARM64::fixup_arm64_pcrel_adrp_imm21: + if (SymLoc == ARM64MCExpr::VK_ABS && !IsNC) + return ELF::R_AARCH64_ADR_PREL_PG_HI21; + if (SymLoc == ARM64MCExpr::VK_GOT && !IsNC) + return ELF::R_AARCH64_ADR_GOT_PAGE; + if (SymLoc == ARM64MCExpr::VK_GOTTPREL && !IsNC) + return ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21; + if (SymLoc == ARM64MCExpr::VK_TLSDESC && !IsNC) + return ELF::R_AARCH64_TLSDESC_ADR_PAGE; + llvm_unreachable("invalid symbol kind for ADRP relocation"); + case ARM64::fixup_arm64_pcrel_branch26: + return ELF::R_AARCH64_JUMP26; + case ARM64::fixup_arm64_pcrel_call26: + return ELF::R_AARCH64_CALL26; + case ARM64::fixup_arm64_pcrel_imm19: + return ELF::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19; + default: + llvm_unreachable("Unsupported pc-relative fixup kind"); + } + } else { + switch ((unsigned)Fixup.getKind()) { + case FK_Data_2: + return ELF::R_AARCH64_ABS16; + case FK_Data_4: + return ELF::R_AARCH64_ABS32; + case FK_Data_8: + return ELF::R_AARCH64_ABS64; + case ARM64::fixup_arm64_add_imm12: + if (SymLoc == ARM64MCExpr::VK_DTPREL && IsNC) + return ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_DTPREL && !IsNC) + return ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12; + if (SymLoc == ARM64MCExpr::VK_TPREL && IsNC) + return ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_TPREL && !IsNC) + return ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12; + if (SymLoc == ARM64MCExpr::VK_TLSDESC && IsNC) + return ELF::R_AARCH64_TLSDESC_ADD_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_ABS && IsNC) + return ELF::R_AARCH64_ADD_ABS_LO12_NC; + + report_fatal_error("invalid fixup for add (uimm12) instruction"); + return 0; + case ARM64::fixup_arm64_ldst_imm12_scale1: + if (SymLoc == ARM64MCExpr::VK_ABS && IsNC) + return ELF::R_AARCH64_LDST8_ABS_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_DTPREL && !IsNC) + return ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12; + if (SymLoc == ARM64MCExpr::VK_DTPREL && IsNC) + return ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_TPREL && !IsNC) + return ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12; + if (SymLoc == ARM64MCExpr::VK_TPREL && IsNC) + return ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC; + + report_fatal_error("invalid fixup for 8-bit load/store instruction"); + return 0; + case ARM64::fixup_arm64_ldst_imm12_scale2: + if (SymLoc == ARM64MCExpr::VK_ABS && IsNC) + return ELF::R_AARCH64_LDST16_ABS_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_DTPREL && !IsNC) + return ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12; + if (SymLoc == ARM64MCExpr::VK_DTPREL && IsNC) + return ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_TPREL && !IsNC) + return ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12; + if (SymLoc == ARM64MCExpr::VK_TPREL && IsNC) + return ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC; + + report_fatal_error("invalid fixup for 16-bit load/store instruction"); + return 0; + case ARM64::fixup_arm64_ldst_imm12_scale4: + if (SymLoc == ARM64MCExpr::VK_ABS && IsNC) + return ELF::R_AARCH64_LDST32_ABS_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_DTPREL && !IsNC) + return ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12; + if (SymLoc == ARM64MCExpr::VK_DTPREL && IsNC) + return ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_TPREL && !IsNC) + return ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12; + if (SymLoc == ARM64MCExpr::VK_TPREL && IsNC) + return ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC; + + report_fatal_error("invalid fixup for 32-bit load/store instruction"); + return 0; + case ARM64::fixup_arm64_ldst_imm12_scale8: + if (SymLoc == ARM64MCExpr::VK_ABS && IsNC) + return ELF::R_AARCH64_LDST64_ABS_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_GOT && IsNC) + return ELF::R_AARCH64_LD64_GOT_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_DTPREL && !IsNC) + return ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12; + if (SymLoc == ARM64MCExpr::VK_DTPREL && IsNC) + return ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_TPREL && !IsNC) + return ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12; + if (SymLoc == ARM64MCExpr::VK_TPREL && IsNC) + return ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_GOTTPREL && IsNC) + return ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC; + if (SymLoc == ARM64MCExpr::VK_TLSDESC && IsNC) + return ELF::R_AARCH64_TLSDESC_LD64_LO12_NC; + + report_fatal_error("invalid fixup for 64-bit load/store instruction"); + return 0; + case ARM64::fixup_arm64_ldst_imm12_scale16: + if (SymLoc == ARM64MCExpr::VK_ABS && IsNC) + return ELF::R_AARCH64_LDST128_ABS_LO12_NC; + + report_fatal_error("invalid fixup for 128-bit load/store instruction"); + return 0; + case ARM64::fixup_arm64_movw: + if (RefKind == ARM64MCExpr::VK_ABS_G3) + return ELF::R_AARCH64_MOVW_UABS_G3; + if (RefKind == ARM64MCExpr::VK_ABS_G2) + return ELF::R_AARCH64_MOVW_UABS_G2; + if (RefKind == ARM64MCExpr::VK_ABS_G2_NC) + return ELF::R_AARCH64_MOVW_UABS_G2_NC; + if (RefKind == ARM64MCExpr::VK_ABS_G1) + return ELF::R_AARCH64_MOVW_UABS_G1; + if (RefKind == ARM64MCExpr::VK_ABS_G1_NC) + return ELF::R_AARCH64_MOVW_UABS_G1_NC; + if (RefKind == ARM64MCExpr::VK_ABS_G0) + return ELF::R_AARCH64_MOVW_UABS_G0; + if (RefKind == ARM64MCExpr::VK_ABS_G0_NC) + return ELF::R_AARCH64_MOVW_UABS_G0_NC; + if (RefKind == ARM64MCExpr::VK_DTPREL_G2) + return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G2; + if (RefKind == ARM64MCExpr::VK_DTPREL_G1) + return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1; + if (RefKind == ARM64MCExpr::VK_DTPREL_G1_NC) + return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC; + if (RefKind == ARM64MCExpr::VK_DTPREL_G0) + return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0; + if (RefKind == ARM64MCExpr::VK_DTPREL_G0_NC) + return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC; + if (RefKind == ARM64MCExpr::VK_TPREL_G2) + return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G2; + if (RefKind == ARM64MCExpr::VK_TPREL_G1) + return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1; + if (RefKind == ARM64MCExpr::VK_TPREL_G1_NC) + return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC; + if (RefKind == ARM64MCExpr::VK_TPREL_G0) + return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0; + if (RefKind == ARM64MCExpr::VK_TPREL_G0_NC) + return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC; + if (RefKind == ARM64MCExpr::VK_GOTTPREL_G1) + return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1; + if (RefKind == ARM64MCExpr::VK_GOTTPREL_G0_NC) + return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC; + report_fatal_error("invalid fixup for movz/movk instruction"); + return 0; + case ARM64::fixup_arm64_tlsdesc_call: + return ELF::R_AARCH64_TLSDESC_CALL; + default: + llvm_unreachable("Unknown ELF relocation type"); + } + } + + llvm_unreachable("Unimplemented fixup -> relocation"); +} + +MCObjectWriter *llvm::createARM64ELFObjectWriter(raw_ostream &OS, + uint8_t OSABI) { + MCELFObjectTargetWriter *MOTW = new ARM64ELFObjectWriter(OSABI); + return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/true); +} diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64ELFStreamer.cpp b/lib/Target/ARM64/MCTargetDesc/ARM64ELFStreamer.cpp new file mode 100644 index 0000000..97a3493 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64ELFStreamer.cpp @@ -0,0 +1,158 @@ +//===- lib/MC/ARM64ELFStreamer.cpp - ELF Object Output for ARM64 ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file assembles .s files and emits AArch64 ELF .o object files. Different +// from generic ELF streamer in emitting mapping symbols ($x and $d) to delimit +// regions of data and code. +// +//===----------------------------------------------------------------------===// + +#include "llvm/MC/MCELFStreamer.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/Twine.h" +#include "llvm/MC/MCAsmBackend.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCELF.h" +#include "llvm/MC/MCELFStreamer.h" +#include "llvm/MC/MCELFSymbolFlags.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCObjectStreamer.h" +#include "llvm/MC/MCSection.h" +#include "llvm/MC/MCSectionELF.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ELF.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +namespace { + +/// Extend the generic ELFStreamer class so that it can emit mapping symbols at +/// the appropriate points in the object files. These symbols are defined in the +/// AArch64 ELF ABI: +/// infocenter.arm.com/help/topic/com.arm.doc.ihi0056a/IHI0056A_aaelf64.pdf +/// +/// In brief: $x or $d should be emitted at the start of each contiguous region +/// of A64 code or data in a section. In practice, this emission does not rely +/// on explicit assembler directives but on inherent properties of the +/// directives doing the emission (e.g. ".byte" is data, "add x0, x0, x0" an +/// instruction). +/// +/// As a result this system is orthogonal to the DataRegion infrastructure used +/// by MachO. Beware! +class ARM64ELFStreamer : public MCELFStreamer { +public: + ARM64ELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS, + MCCodeEmitter *Emitter) + : MCELFStreamer(Context, TAB, OS, Emitter), MappingSymbolCounter(0), + LastEMS(EMS_None) {} + + ~ARM64ELFStreamer() {} + + virtual void ChangeSection(const MCSection *Section, + const MCExpr *Subsection) { + // We have to keep track of the mapping symbol state of any sections we + // use. Each one should start off as EMS_None, which is provided as the + // default constructor by DenseMap::lookup. + LastMappingSymbols[getPreviousSection().first] = LastEMS; + LastEMS = LastMappingSymbols.lookup(Section); + + MCELFStreamer::ChangeSection(Section, Subsection); + } + + /// This function is the one used to emit instruction data into the ELF + /// streamer. We override it to add the appropriate mapping symbol if + /// necessary. + virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) { + EmitA64MappingSymbol(); + MCELFStreamer::EmitInstruction(Inst, STI); + } + + /// This is one of the functions used to emit data into an ELF section, so the + /// ARM64 streamer overrides it to add the appropriate mapping symbol ($d) + /// if necessary. + virtual void EmitBytes(StringRef Data) { + EmitDataMappingSymbol(); + MCELFStreamer::EmitBytes(Data); + } + + /// This is one of the functions used to emit data into an ELF section, so the + /// ARM64 streamer overrides it to add the appropriate mapping symbol ($d) + /// if necessary. + virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) { + EmitDataMappingSymbol(); + MCELFStreamer::EmitValueImpl(Value, Size); + } + +private: + enum ElfMappingSymbol { + EMS_None, + EMS_A64, + EMS_Data + }; + + void EmitDataMappingSymbol() { + if (LastEMS == EMS_Data) + return; + EmitMappingSymbol("$d"); + LastEMS = EMS_Data; + } + + void EmitA64MappingSymbol() { + if (LastEMS == EMS_A64) + return; + EmitMappingSymbol("$x"); + LastEMS = EMS_A64; + } + + void EmitMappingSymbol(StringRef Name) { + MCSymbol *Start = getContext().CreateTempSymbol(); + EmitLabel(Start); + + MCSymbol *Symbol = getContext().GetOrCreateSymbol( + Name + "." + Twine(MappingSymbolCounter++)); + + MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol); + MCELF::SetType(SD, ELF::STT_NOTYPE); + MCELF::SetBinding(SD, ELF::STB_LOCAL); + SD.setExternal(false); + Symbol->setSection(*getCurrentSection().first); + + const MCExpr *Value = MCSymbolRefExpr::Create(Start, getContext()); + Symbol->setVariableValue(Value); + } + + int64_t MappingSymbolCounter; + + DenseMap<const MCSection *, ElfMappingSymbol> LastMappingSymbols; + ElfMappingSymbol LastEMS; + + /// @} +}; +} + +namespace llvm { +MCELFStreamer *createARM64ELFStreamer(MCContext &Context, MCAsmBackend &TAB, + raw_ostream &OS, MCCodeEmitter *Emitter, + bool RelaxAll, bool NoExecStack) { + ARM64ELFStreamer *S = new ARM64ELFStreamer(Context, TAB, OS, Emitter); + if (RelaxAll) + S->getAssembler().setRelaxAll(true); + if (NoExecStack) + S->getAssembler().setNoExecStack(true); + return S; +} +} diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64ELFStreamer.h b/lib/Target/ARM64/MCTargetDesc/ARM64ELFStreamer.h new file mode 100644 index 0000000..72dadbc --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64ELFStreamer.h @@ -0,0 +1,26 @@ +//===-- ARM64ELFStreamer.h - ELF Streamer for ARM64 -------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements ELF streamer information for the ARM64 backend. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AARCH64_ELF_STREAMER_H +#define LLVM_AARCH64_ELF_STREAMER_H + +#include "llvm/MC/MCELFStreamer.h" + +namespace llvm { + +MCELFStreamer *createARM64ELFStreamer(MCContext &Context, MCAsmBackend &TAB, + raw_ostream &OS, MCCodeEmitter *Emitter, + bool RelaxAll, bool NoExecStack); +} + +#endif // ARM64_ELF_STREAMER_H diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64FixupKinds.h b/lib/Target/ARM64/MCTargetDesc/ARM64FixupKinds.h new file mode 100644 index 0000000..02eb91f --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64FixupKinds.h @@ -0,0 +1,72 @@ +//===-- ARM64FixupKinds.h - ARM64 Specific Fixup Entries --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ARM64FIXUPKINDS_H +#define LLVM_ARM64FIXUPKINDS_H + +#include "llvm/MC/MCFixup.h" + +namespace llvm { +namespace ARM64 { + +enum Fixups { + // fixup_arm64_pcrel_adr_imm21 - A 21-bit pc-relative immediate inserted into + // an ADR instruction. + fixup_arm64_pcrel_adr_imm21 = FirstTargetFixupKind, + + // fixup_arm64_pcrel_adrp_imm21 - A 21-bit pc-relative immediate inserted into + // an ADRP instruction. + fixup_arm64_pcrel_adrp_imm21, + + // fixup_arm64_imm12 - 12-bit fixup for add/sub instructions. + // No alignment adjustment. All value bits are encoded. + fixup_arm64_add_imm12, + + // fixup_arm64_ldst_imm12_* - unsigned 12-bit fixups for load and + // store instructions. + fixup_arm64_ldst_imm12_scale1, + fixup_arm64_ldst_imm12_scale2, + fixup_arm64_ldst_imm12_scale4, + fixup_arm64_ldst_imm12_scale8, + fixup_arm64_ldst_imm12_scale16, + + // FIXME: comment + fixup_arm64_movw, + + // fixup_arm64_pcrel_imm14 - The high 14 bits of a 21-bit pc-relative + // immediate. + fixup_arm64_pcrel_branch14, + + // fixup_arm64_pcrel_imm19 - The high 19 bits of a 21-bit pc-relative + // immediate. Same encoding as fixup_arm64_pcrel_adrhi, except this + // is not used as part of a lo/hi pair and thus generates relocations + // directly when necessary. + fixup_arm64_pcrel_imm19, + + // fixup_arm64_pcrel_branch26 - The high 26 bits of a 28-bit pc-relative + // immediate. + fixup_arm64_pcrel_branch26, + + // fixup_arm64_pcrel_call26 - The high 26 bits of a 28-bit pc-relative + // immediate. Distinguished from branch26 only on ELF. + fixup_arm64_pcrel_call26, + + // fixup_arm64_tlsdesc_call - zero-space placeholder for the ELF + // R_AARCH64_TLSDESC_CALL relocation. + fixup_arm64_tlsdesc_call, + + // Marker + LastTargetFixupKind, + NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind +}; + +} // end namespace ARM64 +} // end namespace llvm + +#endif diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64MCAsmInfo.cpp b/lib/Target/ARM64/MCTargetDesc/ARM64MCAsmInfo.cpp new file mode 100644 index 0000000..97e0d3c --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64MCAsmInfo.cpp @@ -0,0 +1,92 @@ +//===-- ARM64MCAsmInfo.cpp - ARM64 asm properties -----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declarations of the ARM64MCAsmInfo properties. +// +//===----------------------------------------------------------------------===// + +#include "ARM64MCAsmInfo.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/Support/CommandLine.h" +using namespace llvm; + +enum AsmWriterVariantTy { + Default = -1, + Generic = 0, + Apple = 1 +}; + +static cl::opt<AsmWriterVariantTy> AsmWriterVariant( + "arm64-neon-syntax", cl::init(Default), + cl::desc("Choose style of NEON code to emit from ARM64 backend:"), + cl::values(clEnumValN(Generic, "generic", "Emit generic NEON assembly"), + clEnumValN(Apple, "apple", "Emit Apple-style NEON assembly"), + clEnumValEnd)); + +ARM64MCAsmInfoDarwin::ARM64MCAsmInfoDarwin() { + // We prefer NEON instructions to be printed in the short form. + AssemblerDialect = AsmWriterVariant == Default ? 1 : AsmWriterVariant; + + PrivateGlobalPrefix = "L"; + SeparatorString = "%%"; + CommentString = ";"; + PointerSize = CalleeSaveStackSlotSize = 8; + + AlignmentIsInBytes = false; + UsesELFSectionDirectiveForBSS = true; + SupportsDebugInformation = true; + UseDataRegionDirectives = true; + + ExceptionsType = ExceptionHandling::DwarfCFI; +} + +const MCExpr *ARM64MCAsmInfoDarwin::getExprForPersonalitySymbol( + const MCSymbol *Sym, unsigned Encoding, MCStreamer &Streamer) const { + // On Darwin, we can reference dwarf symbols with foo@GOT-., which + // is an indirect pc-relative reference. The default implementation + // won't reference using the GOT, so we need this target-specific + // version. + MCContext &Context = Streamer.getContext(); + const MCExpr *Res = + MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOT, Context); + MCSymbol *PCSym = Context.CreateTempSymbol(); + Streamer.EmitLabel(PCSym); + const MCExpr *PC = MCSymbolRefExpr::Create(PCSym, Context); + return MCBinaryExpr::CreateSub(Res, PC, Context); +} + +ARM64MCAsmInfoELF::ARM64MCAsmInfoELF() { + // We prefer NEON instructions to be printed in the short form. + AssemblerDialect = AsmWriterVariant == Default ? 0 : AsmWriterVariant; + + PointerSize = 8; + + // ".comm align is in bytes but .align is pow-2." + AlignmentIsInBytes = false; + + CommentString = "//"; + PrivateGlobalPrefix = ".L"; + Code32Directive = ".code\t32"; + + Data16bitsDirective = "\t.hword\t"; + Data32bitsDirective = "\t.word\t"; + Data64bitsDirective = "\t.xword\t"; + + UseDataRegionDirectives = false; + + WeakRefDirective = "\t.weak\t"; + + HasLEB128 = true; + SupportsDebugInformation = true; + + // Exceptions handling + ExceptionsType = ExceptionHandling::DwarfCFI; +} diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64MCAsmInfo.h b/lib/Target/ARM64/MCTargetDesc/ARM64MCAsmInfo.h new file mode 100644 index 0000000..f2d33a7 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64MCAsmInfo.h @@ -0,0 +1,36 @@ +//=====-- ARM64MCAsmInfo.h - ARM64 asm properties -----------*- C++ -*--====// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the ARM64MCAsmInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef ARM64TARGETASMINFO_H +#define ARM64TARGETASMINFO_H + +#include "llvm/MC/MCAsmInfoDarwin.h" + +namespace llvm { +class Target; +class StringRef; +class MCStreamer; +struct ARM64MCAsmInfoDarwin : public MCAsmInfoDarwin { + explicit ARM64MCAsmInfoDarwin(); + virtual const MCExpr *getExprForPersonalitySymbol(const MCSymbol *Sym, + unsigned Encoding, + MCStreamer &Streamer) const; +}; + +struct ARM64MCAsmInfoELF : public MCAsmInfo { + explicit ARM64MCAsmInfoELF(); +}; + +} // namespace llvm + +#endif diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64MCCodeEmitter.cpp b/lib/Target/ARM64/MCTargetDesc/ARM64MCCodeEmitter.cpp new file mode 100644 index 0000000..19559f8 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64MCCodeEmitter.cpp @@ -0,0 +1,563 @@ +//===-- ARM64/ARM64MCCodeEmitter.cpp - Convert ARM64 code to machine code -===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the ARM64MCCodeEmitter class. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "mccodeemitter" +#include "MCTargetDesc/ARM64AddressingModes.h" +#include "MCTargetDesc/ARM64BaseInfo.h" +#include "MCTargetDesc/ARM64FixupKinds.h" +#include "MCTargetDesc/ARM64MCExpr.h" +#include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +STATISTIC(MCNumEmitted, "Number of MC instructions emitted."); +STATISTIC(MCNumFixups, "Number of MC fixups created."); + +namespace { + +class ARM64MCCodeEmitter : public MCCodeEmitter { + MCContext &Ctx; + + ARM64MCCodeEmitter(const ARM64MCCodeEmitter &); // DO NOT IMPLEMENT + void operator=(const ARM64MCCodeEmitter &); // DO NOT IMPLEMENT +public: + ARM64MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti, + MCContext &ctx) + : Ctx(ctx) {} + + ~ARM64MCCodeEmitter() {} + + // getBinaryCodeForInstr - TableGen'erated function for getting the + // binary encoding for an instruction. + uint64_t getBinaryCodeForInstr(const MCInst &MI, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getMachineOpValue - Return binary encoding of operand. If the machine + /// operand requires relocation, record the relocation and return zero. + unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getAMIndexed8OpValue - Return encoding info for base register + /// and 12-bit unsigned immediate attached to a load, store or prfm + /// instruction. If operand requires a relocation, record it and + /// return zero in that part of the encoding. + template <uint32_t FixupKind> + uint32_t getAMIndexed8OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label + /// target. + uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and + /// the 2-bit shift field. + uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getCondBranchTargetOpValue - Return the encoded value for a conditional + /// branch target. + uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- + /// branch target. + uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getBranchTargetOpValue - Return the encoded value for an unconditional + /// branch target. + uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getMoveWideImmOpValue - Return the encoded value for the immediate operand + /// of a MOVZ or MOVK instruction. + uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getVecShifterOpValue - Return the encoded value for the vector shifter. + uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getMoveVecShifterOpValue - Return the encoded value for the vector move + /// shifter (MSL). + uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getFixedPointScaleOpValue - Return the encoded value for the + // FP-to-fixed-point scale factor. + uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// getSIMDShift64OpValue - Return the encoded value for the + // shift-by-immediate AdvSIMD instructions. + uint32_t getSIMDShift64OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + uint32_t getSIMDShift64_32OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + uint32_t getSIMDShift32OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + uint32_t getSIMDShift16OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue, + const MCSubtargetInfo &STI) const; + + void EmitByte(unsigned char C, raw_ostream &OS) const { OS << (char)C; } + + void EmitConstant(uint64_t Val, unsigned Size, raw_ostream &OS) const { + // Output the constant in little endian byte order. + for (unsigned i = 0; i != Size; ++i) { + EmitByte(Val & 255, OS); + Val >>= 8; + } + } + + void EncodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; +}; + +} // end anonymous namespace + +MCCodeEmitter *llvm::createARM64MCCodeEmitter(const MCInstrInfo &MCII, + const MCRegisterInfo &MRI, + const MCSubtargetInfo &STI, + MCContext &Ctx) { + return new ARM64MCCodeEmitter(MCII, STI, Ctx); +} + +/// getMachineOpValue - Return binary encoding of operand. If the machine +/// operand requires relocation, record the relocation and return zero. +unsigned +ARM64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + if (MO.isReg()) + return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); + else { + assert(MO.isImm() && "did not expect relocated expression"); + return static_cast<unsigned>(MO.getImm()); + } + + assert(0 && "Unable to encode MCOperand!"); + return 0; +} + +template <uint32_t FixupKind> +uint32_t +ARM64MCCodeEmitter::getAMIndexed8OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + unsigned BaseReg = MI.getOperand(OpIdx).getReg(); + BaseReg = Ctx.getRegisterInfo()->getEncodingValue(BaseReg); + + const MCOperand &MO = MI.getOperand(OpIdx + 1); + uint32_t ImmVal = 0; + + if (MO.isImm()) + ImmVal = static_cast<uint32_t>(MO.getImm()); + else { + assert(MO.isExpr() && "unable to encode load/store imm operand"); + MCFixupKind Kind = MCFixupKind(FixupKind); + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc())); + ++MCNumFixups; + } + + return BaseReg | (ImmVal << 5); +} + +/// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label +/// target. +uint32_t +ARM64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + + // If the destination is an immediate, we have nothing to do. + if (MO.isImm()) + return MO.getImm(); + assert(MO.isExpr() && "Unexpected ADR target type!"); + const MCExpr *Expr = MO.getExpr(); + + MCFixupKind Kind = MI.getOpcode() == ARM64::ADR + ? MCFixupKind(ARM64::fixup_arm64_pcrel_adr_imm21) + : MCFixupKind(ARM64::fixup_arm64_pcrel_adrp_imm21); + Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc())); + + MCNumFixups += 1; + + // All of the information is in the fixup. + return 0; +} + +/// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and +/// the 2-bit shift field. The shift field is stored in bits 13-14 of the +/// return value. +uint32_t +ARM64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + // Suboperands are [imm, shifter]. + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx + 1); + assert(ARM64_AM::getShiftType(MO1.getImm()) == ARM64_AM::LSL && + "unexpected shift type for add/sub immediate"); + unsigned ShiftVal = ARM64_AM::getShiftValue(MO1.getImm()); + assert((ShiftVal == 0 || ShiftVal == 12) && + "unexpected shift value for add/sub immediate"); + if (MO.isImm()) + return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << 12)); + assert(MO.isExpr() && "Unable to encode MCOperand!"); + const MCExpr *Expr = MO.getExpr(); + assert(ShiftVal == 0 && "shift not allowed on add/sub immediate with fixup"); + + // Encode the 12 bits of the fixup. + MCFixupKind Kind = MCFixupKind(ARM64::fixup_arm64_add_imm12); + Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc())); + + ++MCNumFixups; + + return 0; +} + +/// getCondBranchTargetOpValue - Return the encoded value for a conditional +/// branch target. +uint32_t ARM64MCCodeEmitter::getCondBranchTargetOpValue( + const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + + // If the destination is an immediate, we have nothing to do. + if (MO.isImm()) + return MO.getImm(); + assert(MO.isExpr() && "Unexpected target type!"); + + MCFixupKind Kind = MCFixupKind(ARM64::fixup_arm64_pcrel_imm19); + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc())); + + ++MCNumFixups; + + // All of the information is in the fixup. + return 0; +} + +uint32_t +ARM64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + + if (MO.isImm()) + return MO.getImm(); + assert(MO.isExpr() && "Unexpected movz/movk immediate"); + + Fixups.push_back(MCFixup::Create( + 0, MO.getExpr(), MCFixupKind(ARM64::fixup_arm64_movw), MI.getLoc())); + + ++MCNumFixups; + + return 0; +} + +/// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- +/// branch target. +uint32_t ARM64MCCodeEmitter::getTestBranchTargetOpValue( + const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + + // If the destination is an immediate, we have nothing to do. + if (MO.isImm()) + return MO.getImm(); + assert(MO.isExpr() && "Unexpected ADR target type!"); + + MCFixupKind Kind = MCFixupKind(ARM64::fixup_arm64_pcrel_branch14); + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc())); + + ++MCNumFixups; + + // All of the information is in the fixup. + return 0; +} + +/// getBranchTargetOpValue - Return the encoded value for an unconditional +/// branch target. +uint32_t +ARM64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + + // If the destination is an immediate, we have nothing to do. + if (MO.isImm()) + return MO.getImm(); + assert(MO.isExpr() && "Unexpected ADR target type!"); + + MCFixupKind Kind = MI.getOpcode() == ARM64::BL + ? MCFixupKind(ARM64::fixup_arm64_pcrel_call26) + : MCFixupKind(ARM64::fixup_arm64_pcrel_branch26); + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc())); + + ++MCNumFixups; + + // All of the information is in the fixup. + return 0; +} + +/// getVecShifterOpValue - Return the encoded value for the vector shifter: +/// +/// 00 -> 0 +/// 01 -> 8 +/// 10 -> 16 +/// 11 -> 24 +uint32_t +ARM64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the shift amount!"); + + switch (MO.getImm()) { + default: + break; + case 0: + return 0; + case 8: + return 1; + case 16: + return 2; + case 24: + return 3; + } + + assert(false && "Invalid value for vector shift amount!"); + return 0; +} + +uint32_t +ARM64MCCodeEmitter::getSIMDShift64OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the shift amount!"); + return 64 - (MO.getImm()); +} + +uint32_t +ARM64MCCodeEmitter::getSIMDShift64_32OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the shift amount!"); + return 64 - (MO.getImm() | 32); +} + +uint32_t +ARM64MCCodeEmitter::getSIMDShift32OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the shift amount!"); + return 32 - (MO.getImm() | 16); +} + +uint32_t +ARM64MCCodeEmitter::getSIMDShift16OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the shift amount!"); + return 16 - (MO.getImm() | 8); +} + +/// getFixedPointScaleOpValue - Return the encoded value for the +// FP-to-fixed-point scale factor. +uint32_t ARM64MCCodeEmitter::getFixedPointScaleOpValue( + const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the scale amount!"); + return 64 - MO.getImm(); +} + +uint32_t +ARM64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the scale amount!"); + return 64 - MO.getImm(); +} + +uint32_t +ARM64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the scale amount!"); + return 32 - MO.getImm(); +} + +uint32_t +ARM64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the scale amount!"); + return 16 - MO.getImm(); +} + +uint32_t +ARM64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the scale amount!"); + return 8 - MO.getImm(); +} + +uint32_t +ARM64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the scale amount!"); + return MO.getImm() - 64; +} + +uint32_t +ARM64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the scale amount!"); + return MO.getImm() - 32; +} + +uint32_t +ARM64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the scale amount!"); + return MO.getImm() - 16; +} + +uint32_t +ARM64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Expected an immediate value for the scale amount!"); + return MO.getImm() - 8; +} + +/// getMoveVecShifterOpValue - Return the encoded value for the vector move +/// shifter (MSL). +uint32_t +ARM64MCCodeEmitter::getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && + "Expected an immediate value for the move shift amount!"); + unsigned ShiftVal = ARM64_AM::getShiftValue(MO.getImm()); + assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!"); + return ShiftVal == 8 ? 0 : 1; +} + +unsigned ARM64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue, + const MCSubtargetInfo &STI) const { + // If one of the signed fixup kinds is applied to a MOVZ instruction, the + // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's + // job to ensure that any bits possibly affected by this are 0. This means we + // must zero out bit 30 (essentially emitting a MOVN). + MCOperand UImm16MO = MI.getOperand(1); + + // Nothing to do if there's no fixup. + if (UImm16MO.isImm()) + return EncodedValue; + + return EncodedValue & ~(1u << 30); +} + +void ARM64MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + if (MI.getOpcode() == ARM64::TLSDESCCALL) { + // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the + // following (BLR) instruction. It doesn't emit any code itself so it + // doesn't go through the normal TableGenerated channels. + MCFixupKind Fixup = MCFixupKind(ARM64::fixup_arm64_tlsdesc_call); + Fixups.push_back(MCFixup::Create(0, MI.getOperand(0).getExpr(), Fixup)); + return; + } + + uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI); + EmitConstant(Binary, 4, OS); + ++MCNumEmitted; // Keep track of the # of mi's emitted. +} + +#include "ARM64GenMCCodeEmitter.inc" diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64MCExpr.cpp b/lib/Target/ARM64/MCTargetDesc/ARM64MCExpr.cpp new file mode 100644 index 0000000..d4ab140 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64MCExpr.cpp @@ -0,0 +1,168 @@ +//===-- ARM64MCExpr.cpp - ARM64 specific MC expression classes --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the implementation of the assembly expression modifiers +// accepted by the AArch64 architecture (e.g. ":lo12:", ":gottprel_g1:", ...). +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "aarch64symbolrefexpr" +#include "ARM64MCExpr.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCELF.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Object/ELF.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace llvm; + +const ARM64MCExpr *ARM64MCExpr::Create(const MCExpr *Expr, VariantKind Kind, + MCContext &Ctx) { + return new (Ctx) ARM64MCExpr(Expr, Kind); +} + +StringRef ARM64MCExpr::getVariantKindName() const { + switch (static_cast<uint32_t>(getKind())) { + case VK_CALL: return ""; + case VK_LO12: return ":lo12:"; + case VK_ABS_G3: return ":abs_g3:"; + case VK_ABS_G2: return ":abs_g2:"; + case VK_ABS_G2_NC: return ":abs_g2_nc:"; + case VK_ABS_G1: return ":abs_g1:"; + case VK_ABS_G1_NC: return ":abs_g1_nc:"; + case VK_ABS_G0: return ":abs_g0:"; + case VK_ABS_G0_NC: return ":abs_g0_nc:"; + case VK_DTPREL_G2: return ":dtprel_g2:"; + case VK_DTPREL_G1: return ":dtprel_g1:"; + case VK_DTPREL_G1_NC: return ":dtprel_g1_nc:"; + case VK_DTPREL_G0: return ":dtprel_g0:"; + case VK_DTPREL_G0_NC: return ":dtprel_g0_nc:"; + case VK_DTPREL_LO12: return ":dtprel_lo12:"; + case VK_DTPREL_LO12_NC: return ":dtprel_lo12_nc:"; + case VK_TPREL_G2: return ":tprel_g2:"; + case VK_TPREL_G1: return ":tprel_g1:"; + case VK_TPREL_G1_NC: return ":tprel_g1_nc:"; + case VK_TPREL_G0: return ":tprel_g0:"; + case VK_TPREL_G0_NC: return ":tprel_g0_nc:"; + case VK_TPREL_LO12: return ":tprel_lo12:"; + case VK_TPREL_LO12_NC: return ":tprel_lo12_nc:"; + case VK_TLSDESC_LO12: return ":tlsdesc_lo12:"; + case VK_ABS_PAGE: return ""; + case VK_GOT_PAGE: return ":got:"; + case VK_GOT_LO12: return ":got_lo12:"; + case VK_GOTTPREL_PAGE: return ":gottprel:"; + case VK_GOTTPREL_LO12_NC: return ":gottprel_lo12:"; + case VK_GOTTPREL_G1: return ":gottprel_g1:"; + case VK_GOTTPREL_G0_NC: return ":gottprel_g0_nc:"; + case VK_TLSDESC: return ""; + case VK_TLSDESC_PAGE: return ":tlsdesc:"; + default: + llvm_unreachable("Invalid ELF symbol kind"); + } +} + +void ARM64MCExpr::PrintImpl(raw_ostream &OS) const { + if (getKind() != VK_NONE) + OS << getVariantKindName(); + OS << *Expr; +} + +// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps +// that method should be made public? +// FIXME: really do above: now that two backends are using it. +static void AddValueSymbolsImpl(const MCExpr *Value, MCAssembler *Asm) { + switch (Value->getKind()) { + case MCExpr::Target: + llvm_unreachable("Can't handle nested target expr!"); + break; + + case MCExpr::Constant: + break; + + case MCExpr::Binary: { + const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value); + AddValueSymbolsImpl(BE->getLHS(), Asm); + AddValueSymbolsImpl(BE->getRHS(), Asm); + break; + } + + case MCExpr::SymbolRef: + Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol()); + break; + + case MCExpr::Unary: + AddValueSymbolsImpl(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm); + break; + } +} + +void ARM64MCExpr::AddValueSymbols(MCAssembler *Asm) const { + AddValueSymbolsImpl(getSubExpr(), Asm); +} + +const MCSection *ARM64MCExpr::FindAssociatedSection() const { + llvm_unreachable("FIXME: what goes here?"); +} + +bool ARM64MCExpr::EvaluateAsRelocatableImpl(MCValue &Res, + const MCAsmLayout *Layout) const { + if (!getSubExpr()->EvaluateAsRelocatable(Res, Layout)) + return false; + + Res = + MCValue::get(Res.getSymA(), Res.getSymB(), Res.getConstant(), getKind()); + + return true; +} + +static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) { + switch (Expr->getKind()) { + case MCExpr::Target: + llvm_unreachable("Can't handle nested target expression"); + break; + case MCExpr::Constant: + break; + + case MCExpr::Binary: { + const MCBinaryExpr *BE = cast<MCBinaryExpr>(Expr); + fixELFSymbolsInTLSFixupsImpl(BE->getLHS(), Asm); + fixELFSymbolsInTLSFixupsImpl(BE->getRHS(), Asm); + break; + } + + case MCExpr::SymbolRef: { + // We're known to be under a TLS fixup, so any symbol should be + // modified. There should be only one. + const MCSymbolRefExpr &SymRef = *cast<MCSymbolRefExpr>(Expr); + MCSymbolData &SD = Asm.getOrCreateSymbolData(SymRef.getSymbol()); + MCELF::SetType(SD, ELF::STT_TLS); + break; + } + + case MCExpr::Unary: + fixELFSymbolsInTLSFixupsImpl(cast<MCUnaryExpr>(Expr)->getSubExpr(), Asm); + break; + } +} + +void ARM64MCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const { + switch (getSymbolLoc(Kind)) { + default: + return; + case VK_DTPREL: + case VK_GOTTPREL: + case VK_TPREL: + case VK_TLSDESC: + break; + } + + fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm); +} diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64MCExpr.h b/lib/Target/ARM64/MCTargetDesc/ARM64MCExpr.h new file mode 100644 index 0000000..a33fe43 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64MCExpr.h @@ -0,0 +1,162 @@ +//=---- ARM64MCExpr.h - ARM64 specific MC expression classes ------*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes ARM64-specific MCExprs, used for modifiers like +// ":lo12:" or ":gottprel_g1:". +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ARM64MCEXPR_H +#define LLVM_ARM64MCEXPR_H + +#include "llvm/MC/MCExpr.h" +#include "llvm/Support/ErrorHandling.h" + +namespace llvm { + +class ARM64MCExpr : public MCTargetExpr { +public: + enum VariantKind { + VK_NONE = 0x000, + + // Symbol locations specifying (roughly speaking) what calculation should be + // performed to construct the final address for the relocated + // symbol. E.g. direct, via the GOT, ... + VK_ABS = 0x001, + VK_SABS = 0x002, + VK_GOT = 0x003, + VK_DTPREL = 0x004, + VK_GOTTPREL = 0x005, + VK_TPREL = 0x006, + VK_TLSDESC = 0x007, + VK_SymLocBits = 0x00f, + + // Variants specifying which part of the final address calculation is + // used. E.g. the low 12 bits for an ADD/LDR, the middle 16 bits for a + // MOVZ/MOVK. + VK_PAGE = 0x010, + VK_PAGEOFF = 0x020, + VK_G0 = 0x030, + VK_G1 = 0x040, + VK_G2 = 0x050, + VK_G3 = 0x060, + VK_AddressFragBits = 0x0f0, + + // Whether the final relocation is a checked one (where a linker should + // perform a range-check on the final address) or not. Note that this field + // is unfortunately sometimes omitted from the assembly syntax. E.g. :lo12: + // on its own is a non-checked relocation. We side with ELF on being + // explicit about this! + VK_NC = 0x100, + + // Convenience definitions for referring to specific textual representations + // of relocation specifiers. Note that this means the "_NC" is sometimes + // omitted in line with assembly syntax here (VK_LO12 rather than VK_LO12_NC + // since a user would write ":lo12:"). + VK_CALL = VK_ABS, + VK_ABS_PAGE = VK_ABS | VK_PAGE, + VK_ABS_G3 = VK_ABS | VK_G3, + VK_ABS_G2 = VK_ABS | VK_G2, + VK_ABS_G2_NC = VK_ABS | VK_G2 | VK_NC, + VK_ABS_G1 = VK_ABS | VK_G1, + VK_ABS_G1_NC = VK_ABS | VK_G1 | VK_NC, + VK_ABS_G0 = VK_ABS | VK_G0, + VK_ABS_G0_NC = VK_ABS | VK_G0 | VK_NC, + VK_LO12 = VK_ABS | VK_PAGEOFF | VK_NC, + VK_GOT_LO12 = VK_GOT | VK_PAGEOFF | VK_NC, + VK_GOT_PAGE = VK_GOT | VK_PAGE, + VK_DTPREL_G2 = VK_DTPREL | VK_G2, + VK_DTPREL_G1 = VK_DTPREL | VK_G1, + VK_DTPREL_G1_NC = VK_DTPREL | VK_G1 | VK_NC, + VK_DTPREL_G0 = VK_DTPREL | VK_G0, + VK_DTPREL_G0_NC = VK_DTPREL | VK_G0 | VK_NC, + VK_DTPREL_LO12 = VK_DTPREL | VK_PAGEOFF, + VK_DTPREL_LO12_NC = VK_DTPREL | VK_PAGEOFF | VK_NC, + VK_GOTTPREL_PAGE = VK_GOTTPREL | VK_PAGE, + VK_GOTTPREL_LO12_NC = VK_GOTTPREL | VK_PAGEOFF | VK_NC, + VK_GOTTPREL_G1 = VK_GOTTPREL | VK_G1, + VK_GOTTPREL_G0_NC = VK_GOTTPREL | VK_G0 | VK_NC, + VK_TPREL_G2 = VK_TPREL | VK_G2, + VK_TPREL_G1 = VK_TPREL | VK_G1, + VK_TPREL_G1_NC = VK_TPREL | VK_G1 | VK_NC, + VK_TPREL_G0 = VK_TPREL | VK_G0, + VK_TPREL_G0_NC = VK_TPREL | VK_G0 | VK_NC, + VK_TPREL_LO12 = VK_TPREL | VK_PAGEOFF, + VK_TPREL_LO12_NC = VK_TPREL | VK_PAGEOFF | VK_NC, + VK_TLSDESC_LO12 = VK_TLSDESC | VK_PAGEOFF | VK_NC, + VK_TLSDESC_PAGE = VK_TLSDESC | VK_PAGE, + + VK_INVALID = 0xfff + }; + +private: + const MCExpr *Expr; + const VariantKind Kind; + + explicit ARM64MCExpr(const MCExpr *Expr, VariantKind Kind) + : Expr(Expr), Kind(Kind) {} + +public: + /// @name Construction + /// @{ + + static const ARM64MCExpr *Create(const MCExpr *Expr, VariantKind Kind, + MCContext &Ctx); + + /// @} + /// @name Accessors + /// @{ + + /// Get the kind of this expression. + VariantKind getKind() const { return static_cast<VariantKind>(Kind); } + + /// Get the expression this modifier applies to. + const MCExpr *getSubExpr() const { return Expr; } + + /// @} + /// @name VariantKind information extractors. + /// @{ + + static VariantKind getSymbolLoc(VariantKind Kind) { + return static_cast<VariantKind>(Kind & VK_SymLocBits); + } + + static VariantKind getAddressFrag(VariantKind Kind) { + return static_cast<VariantKind>(Kind & VK_AddressFragBits); + } + + static bool isNotChecked(VariantKind Kind) { return Kind & VK_NC; } + + /// @} + + /// Convert the variant kind into an ELF-appropriate modifier + /// (e.g. ":got:", ":lo12:"). + StringRef getVariantKindName() const; + + void PrintImpl(raw_ostream &OS) const; + + void AddValueSymbols(MCAssembler *) const; + + const MCSection *FindAssociatedSection() const; + + bool EvaluateAsRelocatableImpl(MCValue &Res, + const MCAsmLayout *Layout) const; + + void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const; + + static bool classof(const MCExpr *E) { + return E->getKind() == MCExpr::Target; + } + + static bool classof(const ARM64MCExpr *) { return true; } + +}; +} // end namespace llvm + +#endif diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp b/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp new file mode 100644 index 0000000..8d54412 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp @@ -0,0 +1,167 @@ +//===-- ARM64MCTargetDesc.cpp - ARM64 Target Descriptions -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides ARM64 specific target descriptions. +// +//===----------------------------------------------------------------------===// + +#include "ARM64MCTargetDesc.h" +#include "ARM64ELFStreamer.h" +#include "ARM64MCAsmInfo.h" +#include "InstPrinter/ARM64InstPrinter.h" +#include "llvm/MC/MCCodeGenInfo.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetRegistry.h" + +#define GET_INSTRINFO_MC_DESC +#include "ARM64GenInstrInfo.inc" + +#define GET_SUBTARGETINFO_MC_DESC +#include "ARM64GenSubtargetInfo.inc" + +#define GET_REGINFO_MC_DESC +#include "ARM64GenRegisterInfo.inc" + +using namespace llvm; + +static MCInstrInfo *createARM64MCInstrInfo() { + MCInstrInfo *X = new MCInstrInfo(); + InitARM64MCInstrInfo(X); + return X; +} + +static MCSubtargetInfo *createARM64MCSubtargetInfo(StringRef TT, StringRef CPU, + StringRef FS) { + MCSubtargetInfo *X = new MCSubtargetInfo(); + InitARM64MCSubtargetInfo(X, TT, CPU, FS); + return X; +} + +static MCRegisterInfo *createARM64MCRegisterInfo(StringRef Triple) { + MCRegisterInfo *X = new MCRegisterInfo(); + InitARM64MCRegisterInfo(X, ARM64::LR); + return X; +} + +static MCAsmInfo *createARM64MCAsmInfo(const MCRegisterInfo &MRI, + StringRef TT) { + Triple TheTriple(TT); + + MCAsmInfo *MAI; + if (TheTriple.isOSDarwin()) + MAI = new ARM64MCAsmInfoDarwin(); + else { + assert(TheTriple.isOSBinFormatELF() && "Only expect Darwin or ELF"); + MAI = new ARM64MCAsmInfoELF(); + } + + // Initial state of the frame pointer is SP. + unsigned Reg = MRI.getDwarfRegNum(ARM64::SP, true); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 0); + MAI->addInitialFrameState(Inst); + + return MAI; +} + +static MCCodeGenInfo *createARM64MCCodeGenInfo(StringRef TT, Reloc::Model RM, + CodeModel::Model CM, + CodeGenOpt::Level OL) { + Triple TheTriple(TT); + assert((TheTriple.isOSBinFormatELF() || TheTriple.isOSBinFormatMachO()) && + "Only expect Darwin and ELF targets"); + + if (CM == CodeModel::Default) + CM = CodeModel::Small; + // The default MCJIT memory managers make no guarantees about where they can + // find an executable page; JITed code needs to be able to refer to globals + // no matter how far away they are. + else if (CM == CodeModel::JITDefault) + CM = CodeModel::Large; + else if (CM != CodeModel::Small && CM != CodeModel::Large) + report_fatal_error("Only small and large code models are allowed on ARM64"); + + // ARM64 Darwin is always PIC. + if (TheTriple.isOSDarwin()) + RM = Reloc::PIC_; + // On ELF platforms the default static relocation model has a smart enough + // linker to cope with referencing external symbols defined in a shared + // library. Hence DynamicNoPIC doesn't need to be promoted to PIC. + else if (RM == Reloc::Default || RM == Reloc::DynamicNoPIC) + RM = Reloc::Static; + + MCCodeGenInfo *X = new MCCodeGenInfo(); + X->InitMCCodeGenInfo(RM, CM, OL); + return X; +} + +static MCInstPrinter *createARM64MCInstPrinter(const Target &T, + unsigned SyntaxVariant, + const MCAsmInfo &MAI, + const MCInstrInfo &MII, + const MCRegisterInfo &MRI, + const MCSubtargetInfo &STI) { + if (SyntaxVariant == 0) + return new ARM64InstPrinter(MAI, MII, MRI, STI); + if (SyntaxVariant == 1) + return new ARM64AppleInstPrinter(MAI, MII, MRI, STI); + + return 0; +} + +static MCStreamer *createMCStreamer(const Target &T, StringRef TT, + MCContext &Ctx, MCAsmBackend &TAB, + raw_ostream &OS, MCCodeEmitter *Emitter, + const MCSubtargetInfo &STI, bool RelaxAll, + bool NoExecStack) { + Triple TheTriple(TT); + + if (TheTriple.isOSDarwin()) + return createMachOStreamer(Ctx, TAB, OS, Emitter, RelaxAll, + /*LabelSections*/ true); + + return createARM64ELFStreamer(Ctx, TAB, OS, Emitter, RelaxAll, NoExecStack); +} + +// Force static initialization. +extern "C" void LLVMInitializeARM64TargetMC() { + // Register the MC asm info. + RegisterMCAsmInfoFn X(TheARM64Target, createARM64MCAsmInfo); + + // Register the MC codegen info. + TargetRegistry::RegisterMCCodeGenInfo(TheARM64Target, + createARM64MCCodeGenInfo); + + // Register the MC instruction info. + TargetRegistry::RegisterMCInstrInfo(TheARM64Target, createARM64MCInstrInfo); + + // Register the MC register info. + TargetRegistry::RegisterMCRegInfo(TheARM64Target, createARM64MCRegisterInfo); + + // Register the MC subtarget info. + TargetRegistry::RegisterMCSubtargetInfo(TheARM64Target, + createARM64MCSubtargetInfo); + + // Register the asm backend. + TargetRegistry::RegisterMCAsmBackend(TheARM64Target, createARM64AsmBackend); + + // Register the MC Code Emitter + TargetRegistry::RegisterMCCodeEmitter(TheARM64Target, + createARM64MCCodeEmitter); + + // Register the object streamer. + TargetRegistry::RegisterMCObjectStreamer(TheARM64Target, createMCStreamer); + + // Register the MCInstPrinter. + TargetRegistry::RegisterMCInstPrinter(TheARM64Target, + createARM64MCInstPrinter); +} diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.h b/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.h new file mode 100644 index 0000000..0db2b22 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.h @@ -0,0 +1,62 @@ +//===-- ARM64MCTargetDesc.h - ARM64 Target Descriptions ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides ARM64 specific target descriptions. +// +//===----------------------------------------------------------------------===// + +#ifndef ARM64MCTARGETDESC_H +#define ARM64MCTARGETDESC_H + +#include "llvm/Support/DataTypes.h" +#include <string> + +namespace llvm { +class MCAsmBackend; +class MCCodeEmitter; +class MCContext; +class MCInstrInfo; +class MCRegisterInfo; +class MCObjectWriter; +class MCSubtargetInfo; +class StringRef; +class Target; +class raw_ostream; + +extern Target TheARM64Target; + +MCCodeEmitter *createARM64MCCodeEmitter(const MCInstrInfo &MCII, + const MCRegisterInfo &MRI, + const MCSubtargetInfo &STI, + MCContext &Ctx); +MCAsmBackend *createARM64AsmBackend(const Target &T, const MCRegisterInfo &MRI, + StringRef TT, StringRef CPU); + +MCObjectWriter *createARM64ELFObjectWriter(raw_ostream &OS, uint8_t OSABI); + +MCObjectWriter *createARM64MachObjectWriter(raw_ostream &OS, uint32_t CPUType, + uint32_t CPUSubtype); + +} // End llvm namespace + +// Defines symbolic names for ARM64 registers. This defines a mapping from +// register name to register number. +// +#define GET_REGINFO_ENUM +#include "ARM64GenRegisterInfo.inc" + +// Defines symbolic names for the ARM64 instructions. +// +#define GET_INSTRINFO_ENUM +#include "ARM64GenInstrInfo.inc" + +#define GET_SUBTARGETINFO_ENUM +#include "ARM64GenSubtargetInfo.inc" + +#endif diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp b/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp new file mode 100644 index 0000000..1733dc5 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp @@ -0,0 +1,396 @@ +//===-- ARMMachObjectWriter.cpp - ARM Mach Object Writer ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/ARM64FixupKinds.h" +#include "MCTargetDesc/ARM64MCTargetDesc.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCAsmLayout.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCFixup.h" +#include "llvm/MC/MCMachObjectWriter.h" +#include "llvm/MC/MCSectionMachO.h" +#include "llvm/MC/MCValue.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MachO.h" +using namespace llvm; + +namespace { +class ARM64MachObjectWriter : public MCMachObjectTargetWriter { + bool getARM64FixupKindMachOInfo(const MCFixup &Fixup, unsigned &RelocType, + const MCSymbolRefExpr *Sym, + unsigned &Log2Size, const MCAssembler &Asm); + +public: + ARM64MachObjectWriter(uint32_t CPUType, uint32_t CPUSubtype) + : MCMachObjectTargetWriter(true /* is64Bit */, CPUType, CPUSubtype, + /*UseAggressiveSymbolFolding=*/true) {} + + void RecordRelocation(MachObjectWriter *Writer, const MCAssembler &Asm, + const MCAsmLayout &Layout, const MCFragment *Fragment, + const MCFixup &Fixup, MCValue Target, + uint64_t &FixedValue); +}; +} + +bool ARM64MachObjectWriter::getARM64FixupKindMachOInfo( + const MCFixup &Fixup, unsigned &RelocType, const MCSymbolRefExpr *Sym, + unsigned &Log2Size, const MCAssembler &Asm) { + RelocType = unsigned(MachO::ARM64_RELOC_UNSIGNED); + Log2Size = ~0U; + + switch ((unsigned)Fixup.getKind()) { + default: + return false; + + case FK_Data_1: + Log2Size = llvm::Log2_32(1); + return true; + case FK_Data_2: + Log2Size = llvm::Log2_32(2); + return true; + case FK_Data_4: + Log2Size = llvm::Log2_32(4); + if (Sym->getKind() == MCSymbolRefExpr::VK_GOT) + RelocType = unsigned(MachO::ARM64_RELOC_POINTER_TO_GOT); + return true; + case FK_Data_8: + Log2Size = llvm::Log2_32(8); + if (Sym->getKind() == MCSymbolRefExpr::VK_GOT) + RelocType = unsigned(MachO::ARM64_RELOC_POINTER_TO_GOT); + return true; + case ARM64::fixup_arm64_add_imm12: + case ARM64::fixup_arm64_ldst_imm12_scale1: + case ARM64::fixup_arm64_ldst_imm12_scale2: + case ARM64::fixup_arm64_ldst_imm12_scale4: + case ARM64::fixup_arm64_ldst_imm12_scale8: + case ARM64::fixup_arm64_ldst_imm12_scale16: + Log2Size = llvm::Log2_32(4); + switch (Sym->getKind()) { + default: + assert(0 && "Unexpected symbol reference variant kind!"); + case MCSymbolRefExpr::VK_PAGEOFF: + RelocType = unsigned(MachO::ARM64_RELOC_PAGEOFF12); + return true; + case MCSymbolRefExpr::VK_GOTPAGEOFF: + RelocType = unsigned(MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12); + return true; + case MCSymbolRefExpr::VK_TLVPPAGEOFF: + RelocType = unsigned(MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12); + return true; + } + case ARM64::fixup_arm64_pcrel_adrp_imm21: + Log2Size = llvm::Log2_32(4); + // This encompasses the relocation for the whole 21-bit value. + switch (Sym->getKind()) { + default: + Asm.getContext().FatalError(Fixup.getLoc(), + "ADR/ADRP relocations must be GOT relative"); + case MCSymbolRefExpr::VK_PAGE: + RelocType = unsigned(MachO::ARM64_RELOC_PAGE21); + return true; + case MCSymbolRefExpr::VK_GOTPAGE: + RelocType = unsigned(MachO::ARM64_RELOC_GOT_LOAD_PAGE21); + return true; + case MCSymbolRefExpr::VK_TLVPPAGE: + RelocType = unsigned(MachO::ARM64_RELOC_TLVP_LOAD_PAGE21); + return true; + } + return true; + case ARM64::fixup_arm64_pcrel_branch26: + case ARM64::fixup_arm64_pcrel_call26: + Log2Size = llvm::Log2_32(4); + RelocType = unsigned(MachO::ARM64_RELOC_BRANCH26); + return true; + } +} + +void ARM64MachObjectWriter::RecordRelocation( + MachObjectWriter *Writer, const MCAssembler &Asm, const MCAsmLayout &Layout, + const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target, + uint64_t &FixedValue) { + unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind()); + + // See <reloc.h>. + uint32_t FixupOffset = Layout.getFragmentOffset(Fragment); + unsigned Log2Size = 0; + int64_t Value = 0; + unsigned Index = 0; + unsigned IsExtern = 0; + unsigned Type = 0; + unsigned Kind = Fixup.getKind(); + + FixupOffset += Fixup.getOffset(); + + // ARM64 pcrel relocation addends do not include the section offset. + if (IsPCRel) + FixedValue += FixupOffset; + + // ADRP fixups use relocations for the whole symbol value and only + // put the addend in the instruction itself. Clear out any value the + // generic code figured out from the sybmol definition. + if (Kind == ARM64::fixup_arm64_pcrel_adrp_imm21 || + Kind == ARM64::fixup_arm64_pcrel_imm19) + FixedValue = 0; + + // imm19 relocations are for conditional branches, which require + // assembler local symbols. If we got here, that's not what we have, + // so complain loudly. + if (Kind == ARM64::fixup_arm64_pcrel_imm19) { + Asm.getContext().FatalError(Fixup.getLoc(), + "conditional branch requires assembler-local" + " label. '" + + Target.getSymA()->getSymbol().getName() + + "' is external."); + return; + } + + // 14-bit branch relocations should only target internal labels, and so + // should never get here. + if (Kind == ARM64::fixup_arm64_pcrel_branch14) { + Asm.getContext().FatalError(Fixup.getLoc(), + "Invalid relocation on conditional branch!"); + return; + } + + if (!getARM64FixupKindMachOInfo(Fixup, Type, Target.getSymA(), Log2Size, + Asm)) { + Asm.getContext().FatalError(Fixup.getLoc(), "unknown ARM64 fixup kind!"); + return; + } + + Value = Target.getConstant(); + + if (Target.isAbsolute()) { // constant + // FIXME: Should this always be extern? + // SymbolNum of 0 indicates the absolute section. + Type = MachO::ARM64_RELOC_UNSIGNED; + Index = 0; + + if (IsPCRel) { + IsExtern = 1; + Asm.getContext().FatalError(Fixup.getLoc(), + "PC relative absolute relocation!"); + + // FIXME: x86_64 sets the type to a branch reloc here. Should we do + // something similar? + } + } else if (Target.getSymB()) { // A - B + constant + const MCSymbol *A = &Target.getSymA()->getSymbol(); + MCSymbolData &A_SD = Asm.getSymbolData(*A); + const MCSymbolData *A_Base = Asm.getAtom(&A_SD); + + const MCSymbol *B = &Target.getSymB()->getSymbol(); + MCSymbolData &B_SD = Asm.getSymbolData(*B); + const MCSymbolData *B_Base = Asm.getAtom(&B_SD); + + // Check for "_foo@got - .", which comes through here as: + // Ltmp0: + // ... _foo@got - Ltmp0 + if (Target.getSymA()->getKind() == MCSymbolRefExpr::VK_GOT && + Target.getSymB()->getKind() == MCSymbolRefExpr::VK_None && + Layout.getSymbolOffset(&B_SD) == + Layout.getFragmentOffset(Fragment) + Fixup.getOffset()) { + // SymB is the PC, so use a PC-rel pointer-to-GOT relocation. + Index = A_Base->getIndex(); + IsExtern = 1; + Type = MachO::ARM64_RELOC_POINTER_TO_GOT; + IsPCRel = 1; + MachO::any_relocation_info MRE; + MRE.r_word0 = FixupOffset; + MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | + (IsExtern << 27) | (Type << 28)); + Writer->addRelocation(Fragment->getParent(), MRE); + return; + } else if (Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None || + Target.getSymB()->getKind() != MCSymbolRefExpr::VK_None) + // Otherwise, neither symbol can be modified. + Asm.getContext().FatalError(Fixup.getLoc(), + "unsupported relocation of modified symbol"); + + // We don't support PCrel relocations of differences. + if (IsPCRel) + Asm.getContext().FatalError(Fixup.getLoc(), + "unsupported pc-relative relocation of " + "difference"); + + // ARM64 always uses external relocations. If there is no symbol to use as + // a base address (a local symbol with no preceeding non-local symbol), + // error out. + // + // FIXME: We should probably just synthesize an external symbol and use + // that. + if (!A_Base) + Asm.getContext().FatalError( + Fixup.getLoc(), + "unsupported relocation of local symbol '" + A->getName() + + "'. Must have non-local symbol earlier in section."); + if (!B_Base) + Asm.getContext().FatalError( + Fixup.getLoc(), + "unsupported relocation of local symbol '" + B->getName() + + "'. Must have non-local symbol earlier in section."); + + if (A_Base == B_Base && A_Base) + Asm.getContext().FatalError(Fixup.getLoc(), + "unsupported relocation with identical base"); + + Value += (A_SD.getFragment() == NULL ? 0 : Writer->getSymbolAddress( + &A_SD, Layout)) - + (A_Base == NULL || A_Base->getFragment() == NULL + ? 0 + : Writer->getSymbolAddress(A_Base, Layout)); + Value -= (B_SD.getFragment() == NULL ? 0 : Writer->getSymbolAddress( + &B_SD, Layout)) - + (B_Base == NULL || B_Base->getFragment() == NULL + ? 0 + : Writer->getSymbolAddress(B_Base, Layout)); + + Index = A_Base->getIndex(); + IsExtern = 1; + Type = MachO::ARM64_RELOC_UNSIGNED; + + MachO::any_relocation_info MRE; + MRE.r_word0 = FixupOffset; + MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | + (IsExtern << 27) | (Type << 28)); + Writer->addRelocation(Fragment->getParent(), MRE); + + Index = B_Base->getIndex(); + IsExtern = 1; + Type = MachO::ARM64_RELOC_SUBTRACTOR; + } else { // A + constant + const MCSymbol *Symbol = &Target.getSymA()->getSymbol(); + MCSymbolData &SD = Asm.getSymbolData(*Symbol); + const MCSymbolData *Base = Asm.getAtom(&SD); + const MCSectionMachO &Section = static_cast<const MCSectionMachO &>( + Fragment->getParent()->getSection()); + + // If the symbol is a variable and we weren't able to get a Base for it + // (i.e., it's not in the symbol table associated with a section) resolve + // the relocation based its expansion instead. + if (Symbol->isVariable() && !Base) { + // If the evaluation is an absolute value, just use that directly + // to keep things easy. + int64_t Res; + if (SD.getSymbol().getVariableValue()->EvaluateAsAbsolute( + Res, Layout, Writer->getSectionAddressMap())) { + FixedValue = Res; + return; + } + + // FIXME: Will the Target we already have ever have any data in it + // we need to preserve and merge with the new Target? How about + // the FixedValue? + if (!Symbol->getVariableValue()->EvaluateAsRelocatable(Target, &Layout)) + Asm.getContext().FatalError(Fixup.getLoc(), + "unable to resolve variable '" + + Symbol->getName() + "'"); + return RecordRelocation(Writer, Asm, Layout, Fragment, Fixup, Target, + FixedValue); + } + + // Relocations inside debug sections always use local relocations when + // possible. This seems to be done because the debugger doesn't fully + // understand relocation entries and expects to find values that + // have already been fixed up. + if (Symbol->isInSection()) { + if (Section.hasAttribute(MachO::S_ATTR_DEBUG)) + Base = 0; + } + + // ARM64 uses external relocations as much as possible. For debug sections, + // and for pointer-sized relocations (.quad), we allow section relocations. + // It's code sections that run into trouble. + if (Base) { + Index = Base->getIndex(); + IsExtern = 1; + + // Add the local offset, if needed. + if (Base != &SD) + Value += Layout.getSymbolOffset(&SD) - Layout.getSymbolOffset(Base); + } else if (Symbol->isInSection()) { + // Pointer-sized relocations can use a local relocation. Otherwise, + // we have to be in a debug info section. + if (!Section.hasAttribute(MachO::S_ATTR_DEBUG) && Log2Size != 3) + Asm.getContext().FatalError( + Fixup.getLoc(), + "unsupported relocation of local symbol '" + Symbol->getName() + + "'. Must have non-local symbol earlier in section."); + // Adjust the relocation to be section-relative. + // The index is the section ordinal (1-based). + const MCSectionData &SymSD = + Asm.getSectionData(SD.getSymbol().getSection()); + Index = SymSD.getOrdinal() + 1; + IsExtern = 0; + Value += Writer->getSymbolAddress(&SD, Layout); + + if (IsPCRel) + Value -= Writer->getFragmentAddress(Fragment, Layout) + + Fixup.getOffset() + (1ULL << Log2Size); + } else { + // Resolve constant variables. + if (SD.getSymbol().isVariable()) { + int64_t Res; + if (SD.getSymbol().getVariableValue()->EvaluateAsAbsolute( + Res, Layout, Writer->getSectionAddressMap())) { + FixedValue = Res; + return; + } + } + Asm.getContext().FatalError(Fixup.getLoc(), + "unsupported relocation of variable '" + + Symbol->getName() + "'"); + } + } + + // If the relocation kind is Branch26, Page21, or Pageoff12, any addend + // is represented via an Addend relocation, not encoded directly into + // the instruction. + if ((Type == MachO::ARM64_RELOC_BRANCH26 || + Type == MachO::ARM64_RELOC_PAGE21 || + Type == MachO::ARM64_RELOC_PAGEOFF12) && + Value) { + assert((Value & 0xff000000) == 0 && "Added relocation out of range!"); + + MachO::any_relocation_info MRE; + MRE.r_word0 = FixupOffset; + MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | + (IsExtern << 27) | (Type << 28)); + Writer->addRelocation(Fragment->getParent(), MRE); + + // Now set up the Addend relocation. + Type = MachO::ARM64_RELOC_ADDEND; + Index = Value; + IsPCRel = 0; + Log2Size = 2; + IsExtern = 0; + + // Put zero into the instruction itself. The addend is in the relocation. + Value = 0; + } + + // If there's any addend left to handle, encode it in the instruction. + FixedValue = Value; + + // struct relocation_info (8 bytes) + MachO::any_relocation_info MRE; + MRE.r_word0 = FixupOffset; + MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | + (IsExtern << 27) | (Type << 28)); + Writer->addRelocation(Fragment->getParent(), MRE); +} + +MCObjectWriter *llvm::createARM64MachObjectWriter(raw_ostream &OS, + uint32_t CPUType, + uint32_t CPUSubtype) { + return createMachObjectWriter(new ARM64MachObjectWriter(CPUType, CPUSubtype), + OS, /*IsLittleEndian=*/true); +} diff --git a/lib/Target/ARM64/MCTargetDesc/CMakeLists.txt b/lib/Target/ARM64/MCTargetDesc/CMakeLists.txt new file mode 100644 index 0000000..f8665bc --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/CMakeLists.txt @@ -0,0 +1,14 @@ +add_llvm_library(LLVMARM64Desc + ARM64AsmBackend.cpp + ARM64ELFObjectWriter.cpp + ARM64ELFStreamer.cpp + ARM64MCAsmInfo.cpp + ARM64MCCodeEmitter.cpp + ARM64MCExpr.cpp + ARM64MCTargetDesc.cpp + ARM64MachObjectWriter.cpp +) +add_dependencies(LLVMARM64Desc ARM64CommonTableGen) + +# Hack: we need to include 'main' target directory to grab private headers +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/.. ${CMAKE_CURRENT_BINARY_DIR}/..) diff --git a/lib/Target/ARM64/MCTargetDesc/LLVMBuild.txt b/lib/Target/ARM64/MCTargetDesc/LLVMBuild.txt new file mode 100644 index 0000000..e4c74d2 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/LLVMBuild.txt @@ -0,0 +1,24 @@ +;===- ./lib/Target/ARM64/MCTargetDesc/LLVMBuild.txt ------------*- Conf -*--===; +; +; The LLVM Compiler Infrastructure +; +; This file is distributed under the University of Illinois Open Source +; License. See LICENSE.TXT for details. +; +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = ARM64Desc +parent = ARM64 +required_libraries = ARM64AsmPrinter ARM64Info MC Support +add_to_library_groups = ARM64 + diff --git a/lib/Target/ARM64/MCTargetDesc/Makefile b/lib/Target/ARM64/MCTargetDesc/Makefile new file mode 100644 index 0000000..013cc63 --- /dev/null +++ b/lib/Target/ARM64/MCTargetDesc/Makefile @@ -0,0 +1,16 @@ +##===- lib/Target/ARM64/TargetDesc/Makefile ----------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## + +LEVEL = ../../../.. +LIBRARYNAME = LLVMARM64Desc + +# Hack: we need to include 'main' target directory to grab private headers +CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. + +include $(LEVEL)/Makefile.common |
