aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/AArch64/MCTargetDesc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/AArch64/MCTargetDesc')
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h738
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp1009
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp428
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp49
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h7
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h161
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp67
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h23
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp914
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp212
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h237
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp196
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h38
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp396
-rw-r--r--lib/Target/AArch64/MCTargetDesc/Android.mk1
-rw-r--r--lib/Target/AArch64/MCTargetDesc/CMakeLists.txt7
-rw-r--r--lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt2
17 files changed, 2804 insertions, 1681 deletions
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
new file mode 100644
index 0000000..8b1e44e
--- /dev/null
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
@@ -0,0 +1,738 @@
+//===- AArch64AddressingModes.h - AArch64 Addressing Modes ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the AArch64 addressing mode implementation stuff.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_AArch64_AArch64ADDRESSINGMODES_H
+#define LLVM_TARGET_AArch64_AArch64ADDRESSINGMODES_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+
+namespace llvm {
+
+/// AArch64_AM - AArch64 Addressing Mode Stuff
+namespace AArch64_AM {
+
+//===----------------------------------------------------------------------===//
+// Shifts
+//
+
+enum ShiftExtendType {
+ InvalidShiftExtend = -1,
+ LSL = 0,
+ LSR,
+ ASR,
+ ROR,
+ MSL,
+
+ UXTB,
+ UXTH,
+ UXTW,
+ UXTX,
+
+ SXTB,
+ SXTH,
+ SXTW,
+ SXTX,
+};
+
+/// getShiftName - Get the string encoding for the shift type.
+static inline const char *getShiftExtendName(AArch64_AM::ShiftExtendType ST) {
+ switch (ST) {
+ default: assert(false && "unhandled shift type!");
+ case AArch64_AM::LSL: return "lsl";
+ case AArch64_AM::LSR: return "lsr";
+ case AArch64_AM::ASR: return "asr";
+ case AArch64_AM::ROR: return "ror";
+ case AArch64_AM::MSL: return "msl";
+ case AArch64_AM::UXTB: return "uxtb";
+ case AArch64_AM::UXTH: return "uxth";
+ case AArch64_AM::UXTW: return "uxtw";
+ case AArch64_AM::UXTX: return "uxtx";
+ case AArch64_AM::SXTB: return "sxtb";
+ case AArch64_AM::SXTH: return "sxth";
+ case AArch64_AM::SXTW: return "sxtw";
+ case AArch64_AM::SXTX: return "sxtx";
+ }
+ return nullptr;
+}
+
+/// getShiftType - Extract the shift type.
+static inline AArch64_AM::ShiftExtendType getShiftType(unsigned Imm) {
+ switch ((Imm >> 6) & 0x7) {
+ default: return AArch64_AM::InvalidShiftExtend;
+ case 0: return AArch64_AM::LSL;
+ case 1: return AArch64_AM::LSR;
+ case 2: return AArch64_AM::ASR;
+ case 3: return AArch64_AM::ROR;
+ case 4: return AArch64_AM::MSL;
+ }
+}
+
+/// getShiftValue - Extract the shift value.
+static inline unsigned getShiftValue(unsigned Imm) {
+ return Imm & 0x3f;
+}
+
+/// getShifterImm - Encode the shift type and amount:
+/// imm: 6-bit shift amount
+/// shifter: 000 ==> lsl
+/// 001 ==> lsr
+/// 010 ==> asr
+/// 011 ==> ror
+/// 100 ==> msl
+/// {8-6} = shifter
+/// {5-0} = imm
+static inline unsigned getShifterImm(AArch64_AM::ShiftExtendType ST,
+ unsigned Imm) {
+ assert((Imm & 0x3f) == Imm && "Illegal shifted immedate value!");
+ unsigned STEnc = 0;
+ switch (ST) {
+ default: llvm_unreachable("Invalid shift requested");
+ case AArch64_AM::LSL: STEnc = 0; break;
+ case AArch64_AM::LSR: STEnc = 1; break;
+ case AArch64_AM::ASR: STEnc = 2; break;
+ case AArch64_AM::ROR: STEnc = 3; break;
+ case AArch64_AM::MSL: STEnc = 4; break;
+ }
+ return (STEnc << 6) | (Imm & 0x3f);
+}
+
+//===----------------------------------------------------------------------===//
+// Extends
+//
+
+/// getArithShiftValue - get the arithmetic shift value.
+static inline unsigned getArithShiftValue(unsigned Imm) {
+ return Imm & 0x7;
+}
+
+/// getExtendType - Extract the extend type for operands of arithmetic ops.
+static inline AArch64_AM::ShiftExtendType getExtendType(unsigned Imm) {
+ assert((Imm & 0x7) == Imm && "invalid immediate!");
+ switch (Imm) {
+ default: llvm_unreachable("Compiler bug!");
+ case 0: return AArch64_AM::UXTB;
+ case 1: return AArch64_AM::UXTH;
+ case 2: return AArch64_AM::UXTW;
+ case 3: return AArch64_AM::UXTX;
+ case 4: return AArch64_AM::SXTB;
+ case 5: return AArch64_AM::SXTH;
+ case 6: return AArch64_AM::SXTW;
+ case 7: return AArch64_AM::SXTX;
+ }
+}
+
+static inline AArch64_AM::ShiftExtendType getArithExtendType(unsigned Imm) {
+ return getExtendType((Imm >> 3) & 0x7);
+}
+
+/// Mapping from extend bits to required operation:
+/// shifter: 000 ==> uxtb
+/// 001 ==> uxth
+/// 010 ==> uxtw
+/// 011 ==> uxtx
+/// 100 ==> sxtb
+/// 101 ==> sxth
+/// 110 ==> sxtw
+/// 111 ==> sxtx
+inline unsigned getExtendEncoding(AArch64_AM::ShiftExtendType ET) {
+ switch (ET) {
+ default: llvm_unreachable("Invalid extend type requested");
+ case AArch64_AM::UXTB: return 0; break;
+ case AArch64_AM::UXTH: return 1; break;
+ case AArch64_AM::UXTW: return 2; break;
+ case AArch64_AM::UXTX: return 3; break;
+ case AArch64_AM::SXTB: return 4; break;
+ case AArch64_AM::SXTH: return 5; break;
+ case AArch64_AM::SXTW: return 6; break;
+ case AArch64_AM::SXTX: return 7; break;
+ }
+}
+
+/// getArithExtendImm - Encode the extend type and shift amount for an
+/// arithmetic instruction:
+/// imm: 3-bit extend amount
+/// {5-3} = shifter
+/// {2-0} = imm3
+static inline unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET,
+ unsigned Imm) {
+ assert((Imm & 0x7) == Imm && "Illegal shifted immedate value!");
+ return (getExtendEncoding(ET) << 3) | (Imm & 0x7);
+}
+
+/// getMemDoShift - Extract the "do shift" flag value for load/store
+/// instructions.
+static inline bool getMemDoShift(unsigned Imm) {
+ return (Imm & 0x1) != 0;
+}
+
+/// getExtendType - Extract the extend type for the offset operand of
+/// loads/stores.
+static inline AArch64_AM::ShiftExtendType getMemExtendType(unsigned Imm) {
+ return getExtendType((Imm >> 1) & 0x7);
+}
+
+/// getExtendImm - Encode the extend type and amount for a load/store inst:
+/// doshift: should the offset be scaled by the access size
+/// shifter: 000 ==> uxtb
+/// 001 ==> uxth
+/// 010 ==> uxtw
+/// 011 ==> uxtx
+/// 100 ==> sxtb
+/// 101 ==> sxth
+/// 110 ==> sxtw
+/// 111 ==> sxtx
+/// {3-1} = shifter
+/// {0} = doshift
+static inline unsigned getMemExtendImm(AArch64_AM::ShiftExtendType ET,
+ bool DoShift) {
+ return (getExtendEncoding(ET) << 1) | unsigned(DoShift);
+}
+
+static inline uint64_t ror(uint64_t elt, unsigned size) {
+ return ((elt & 1) << (size-1)) | (elt >> 1);
+}
+
+/// processLogicalImmediate - Determine if an immediate value can be encoded
+/// as the immediate operand of a logical instruction for the given register
+/// size. If so, return true with "encoding" set to the encoded value in
+/// the form N:immr:imms.
+static inline bool processLogicalImmediate(uint64_t imm, unsigned regSize,
+ uint64_t &encoding) {
+ if (imm == 0ULL || imm == ~0ULL ||
+ (regSize != 64 && (imm >> regSize != 0 || imm == ~0U)))
+ return false;
+
+ unsigned size = 2;
+ uint64_t eltVal = imm;
+
+ // First, determine the element size.
+ while (size < regSize) {
+ unsigned numElts = regSize / size;
+ unsigned mask = (1ULL << size) - 1;
+ uint64_t lowestEltVal = imm & mask;
+
+ bool allMatched = true;
+ for (unsigned i = 1; i < numElts; ++i) {
+ uint64_t currEltVal = (imm >> (i*size)) & mask;
+ if (currEltVal != lowestEltVal) {
+ allMatched = false;
+ break;
+ }
+ }
+
+ if (allMatched) {
+ eltVal = lowestEltVal;
+ break;
+ }
+
+ size *= 2;
+ }
+
+ // Second, determine the rotation to make the element be: 0^m 1^n.
+ for (unsigned i = 0; i < size; ++i) {
+ eltVal = ror(eltVal, size);
+ uint32_t clz = countLeadingZeros(eltVal) - (64 - size);
+ uint32_t cto = CountTrailingOnes_64(eltVal);
+
+ if (clz + cto == size) {
+ // Encode in immr the number of RORs it would take to get *from* this
+ // element value to our target value, where i+1 is the number of RORs
+ // to go the opposite direction.
+ unsigned immr = size - (i + 1);
+
+ // If size has a 1 in the n'th bit, create a value that has zeroes in
+ // bits [0, n] and ones above that.
+ uint64_t nimms = ~(size-1) << 1;
+
+ // Or the CTO value into the low bits, which must be below the Nth bit
+ // bit mentioned above.
+ nimms |= (cto-1);
+
+ // Extract the seventh bit and toggle it to create the N field.
+ unsigned N = ((nimms >> 6) & 1) ^ 1;
+
+ encoding = (N << 12) | (immr << 6) | (nimms & 0x3f);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// isLogicalImmediate - Return true if the immediate is valid for a logical
+/// immediate instruction of the given register size. Return false otherwise.
+static inline bool isLogicalImmediate(uint64_t imm, unsigned regSize) {
+ uint64_t encoding;
+ return processLogicalImmediate(imm, regSize, encoding);
+}
+
+/// encodeLogicalImmediate - Return the encoded immediate value for a logical
+/// immediate instruction of the given register size.
+static inline uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize) {
+ uint64_t encoding = 0;
+ bool res = processLogicalImmediate(imm, regSize, encoding);
+ assert(res && "invalid logical immediate");
+ (void)res;
+ return encoding;
+}
+
+/// decodeLogicalImmediate - Decode a logical immediate value in the form
+/// "N:immr:imms" (where the immr and imms fields are each 6 bits) into the
+/// integer value it represents with regSize bits.
+static inline uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize) {
+ // Extract the N, imms, and immr fields.
+ unsigned N = (val >> 12) & 1;
+ unsigned immr = (val >> 6) & 0x3f;
+ unsigned imms = val & 0x3f;
+
+ assert((regSize == 64 || N == 0) && "undefined logical immediate encoding");
+ int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
+ assert(len >= 0 && "undefined logical immediate encoding");
+ unsigned size = (1 << len);
+ unsigned R = immr & (size - 1);
+ unsigned S = imms & (size - 1);
+ assert(S != size - 1 && "undefined logical immediate encoding");
+ uint64_t pattern = (1ULL << (S + 1)) - 1;
+ for (unsigned i = 0; i < R; ++i)
+ pattern = ror(pattern, size);
+
+ // Replicate the pattern to fill the regSize.
+ while (size != regSize) {
+ pattern |= (pattern << size);
+ size *= 2;
+ }
+ return pattern;
+}
+
+/// isValidDecodeLogicalImmediate - Check to see if the logical immediate value
+/// in the form "N:immr:imms" (where the immr and imms fields are each 6 bits)
+/// is a valid encoding for an integer value with regSize bits.
+static inline bool isValidDecodeLogicalImmediate(uint64_t val,
+ unsigned regSize) {
+ // Extract the N and imms fields needed for checking.
+ unsigned N = (val >> 12) & 1;
+ unsigned imms = val & 0x3f;
+
+ if (regSize == 32 && N != 0) // undefined logical immediate encoding
+ return false;
+ int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
+ if (len < 0) // undefined logical immediate encoding
+ return false;
+ unsigned size = (1 << len);
+ unsigned S = imms & (size - 1);
+ if (S == size - 1) // undefined logical immediate encoding
+ return false;
+
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Floating-point Immediates
+//
+static inline float getFPImmFloat(unsigned Imm) {
+ // We expect an 8-bit binary encoding of a floating-point number here.
+ union {
+ uint32_t I;
+ float F;
+ } FPUnion;
+
+ uint8_t Sign = (Imm >> 7) & 0x1;
+ uint8_t Exp = (Imm >> 4) & 0x7;
+ uint8_t Mantissa = Imm & 0xf;
+
+ // 8-bit FP iEEEE Float Encoding
+ // abcd efgh aBbbbbbc defgh000 00000000 00000000
+ //
+ // where B = NOT(b);
+
+ FPUnion.I = 0;
+ FPUnion.I |= Sign << 31;
+ FPUnion.I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30;
+ FPUnion.I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25;
+ FPUnion.I |= (Exp & 0x3) << 23;
+ FPUnion.I |= Mantissa << 19;
+ return FPUnion.F;
+}
+
+/// getFP32Imm - Return an 8-bit floating-point version of the 32-bit
+/// floating-point value. If the value cannot be represented as an 8-bit
+/// floating-point value, then return -1.
+static inline int getFP32Imm(const APInt &Imm) {
+ uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
+ int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
+ int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits
+
+ // We can handle 4 bits of mantissa.
+ // mantissa = (16+UInt(e:f:g:h))/16.
+ if (Mantissa & 0x7ffff)
+ return -1;
+ Mantissa >>= 19;
+ if ((Mantissa & 0xf) != Mantissa)
+ return -1;
+
+ // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
+ if (Exp < -3 || Exp > 4)
+ return -1;
+ Exp = ((Exp+3) & 0x7) ^ 4;
+
+ return ((int)Sign << 7) | (Exp << 4) | Mantissa;
+}
+
+static inline int getFP32Imm(const APFloat &FPImm) {
+ return getFP32Imm(FPImm.bitcastToAPInt());
+}
+
+/// getFP64Imm - Return an 8-bit floating-point version of the 64-bit
+/// floating-point value. If the value cannot be represented as an 8-bit
+/// floating-point value, then return -1.
+static inline int getFP64Imm(const APInt &Imm) {
+ uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
+ int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
+ uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL;
+
+ // We can handle 4 bits of mantissa.
+ // mantissa = (16+UInt(e:f:g:h))/16.
+ if (Mantissa & 0xffffffffffffULL)
+ return -1;
+ Mantissa >>= 48;
+ if ((Mantissa & 0xf) != Mantissa)
+ return -1;
+
+ // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
+ if (Exp < -3 || Exp > 4)
+ return -1;
+ Exp = ((Exp+3) & 0x7) ^ 4;
+
+ return ((int)Sign << 7) | (Exp << 4) | Mantissa;
+}
+
+static inline int getFP64Imm(const APFloat &FPImm) {
+ return getFP64Imm(FPImm.bitcastToAPInt());
+}
+
+//===--------------------------------------------------------------------===//
+// AdvSIMD Modified Immediates
+//===--------------------------------------------------------------------===//
+
+// 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh
+static inline bool isAdvSIMDModImmType1(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xffffff00ffffff00ULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType1(uint64_t Imm) {
+ return (Imm & 0xffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType1(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 32) | EncVal;
+}
+
+// 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00
+static inline bool isAdvSIMDModImmType2(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xffff00ffffff00ffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType2(uint64_t Imm) {
+ return (Imm & 0xff00ULL) >> 8;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType2(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 40) | (EncVal << 8);
+}
+
+// 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00
+static inline bool isAdvSIMDModImmType3(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xff00ffffff00ffffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType3(uint64_t Imm) {
+ return (Imm & 0xff0000ULL) >> 16;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType3(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 48) | (EncVal << 16);
+}
+
+// abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00
+static inline bool isAdvSIMDModImmType4(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0x00ffffff00ffffffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType4(uint64_t Imm) {
+ return (Imm & 0xff000000ULL) >> 24;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType4(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 56) | (EncVal << 24);
+}
+
+// 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh
+static inline bool isAdvSIMDModImmType5(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ (((Imm & 0x00ff0000ULL) >> 16) == (Imm & 0x000000ffULL)) &&
+ ((Imm & 0xff00ff00ff00ff00ULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType5(uint64_t Imm) {
+ return (Imm & 0xffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType5(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 48) | (EncVal << 32) | (EncVal << 16) | EncVal;
+}
+
+// abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00
+static inline bool isAdvSIMDModImmType6(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ (((Imm & 0xff000000ULL) >> 16) == (Imm & 0x0000ff00ULL)) &&
+ ((Imm & 0x00ff00ff00ff00ffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType6(uint64_t Imm) {
+ return (Imm & 0xff00ULL) >> 8;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType6(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 56) | (EncVal << 40) | (EncVal << 24) | (EncVal << 8);
+}
+
+// 0x00 0x00 abcdefgh 0xFF 0x00 0x00 abcdefgh 0xFF
+static inline bool isAdvSIMDModImmType7(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xffff00ffffff00ffULL) == 0x000000ff000000ffULL);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType7(uint64_t Imm) {
+ return (Imm & 0xff00ULL) >> 8;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType7(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 40) | (EncVal << 8) | 0x000000ff000000ffULL;
+}
+
+// 0x00 abcdefgh 0xFF 0xFF 0x00 abcdefgh 0xFF 0xFF
+static inline bool isAdvSIMDModImmType8(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xff00ffffff00ffffULL) == 0x0000ffff0000ffffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType8(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 48) | (EncVal << 16) | 0x0000ffff0000ffffULL;
+}
+
+static inline uint8_t encodeAdvSIMDModImmType8(uint64_t Imm) {
+ return (Imm & 0x00ff0000ULL) >> 16;
+}
+
+// abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh
+static inline bool isAdvSIMDModImmType9(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm >> 48) == (Imm & 0x0000ffffULL)) &&
+ ((Imm >> 56) == (Imm & 0x000000ffULL));
+}
+
+static inline uint8_t encodeAdvSIMDModImmType9(uint64_t Imm) {
+ return (Imm & 0xffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType9(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ EncVal |= (EncVal << 8);
+ EncVal |= (EncVal << 16);
+ EncVal |= (EncVal << 32);
+ return EncVal;
+}
+
+// aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
+// cmode: 1110, op: 1
+static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
+ uint64_t ByteA = Imm & 0xff00000000000000ULL;
+ uint64_t ByteB = Imm & 0x00ff000000000000ULL;
+ uint64_t ByteC = Imm & 0x0000ff0000000000ULL;
+ uint64_t ByteD = Imm & 0x000000ff00000000ULL;
+ uint64_t ByteE = Imm & 0x00000000ff000000ULL;
+ uint64_t ByteF = Imm & 0x0000000000ff0000ULL;
+ uint64_t ByteG = Imm & 0x000000000000ff00ULL;
+ uint64_t ByteH = Imm & 0x00000000000000ffULL;
+
+ return (ByteA == 0ULL || ByteA == 0xff00000000000000ULL) &&
+ (ByteB == 0ULL || ByteB == 0x00ff000000000000ULL) &&
+ (ByteC == 0ULL || ByteC == 0x0000ff0000000000ULL) &&
+ (ByteD == 0ULL || ByteD == 0x000000ff00000000ULL) &&
+ (ByteE == 0ULL || ByteE == 0x00000000ff000000ULL) &&
+ (ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) &&
+ (ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) &&
+ (ByteH == 0ULL || ByteH == 0x00000000000000ffULL);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType10(uint64_t Imm) {
+ uint8_t BitA = (Imm & 0xff00000000000000ULL) != 0;
+ uint8_t BitB = (Imm & 0x00ff000000000000ULL) != 0;
+ uint8_t BitC = (Imm & 0x0000ff0000000000ULL) != 0;
+ uint8_t BitD = (Imm & 0x000000ff00000000ULL) != 0;
+ uint8_t BitE = (Imm & 0x00000000ff000000ULL) != 0;
+ uint8_t BitF = (Imm & 0x0000000000ff0000ULL) != 0;
+ uint8_t BitG = (Imm & 0x000000000000ff00ULL) != 0;
+ uint8_t BitH = (Imm & 0x00000000000000ffULL) != 0;
+
+ uint8_t EncVal = BitA;
+ EncVal <<= 1;
+ EncVal |= BitB;
+ EncVal <<= 1;
+ EncVal |= BitC;
+ EncVal <<= 1;
+ EncVal |= BitD;
+ EncVal <<= 1;
+ EncVal |= BitE;
+ EncVal <<= 1;
+ EncVal |= BitF;
+ EncVal <<= 1;
+ EncVal |= BitG;
+ EncVal <<= 1;
+ EncVal |= BitH;
+ return EncVal;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType10(uint8_t Imm) {
+ uint64_t EncVal = 0;
+ if (Imm & 0x80) EncVal |= 0xff00000000000000ULL;
+ if (Imm & 0x40) EncVal |= 0x00ff000000000000ULL;
+ if (Imm & 0x20) EncVal |= 0x0000ff0000000000ULL;
+ if (Imm & 0x10) EncVal |= 0x000000ff00000000ULL;
+ if (Imm & 0x08) EncVal |= 0x00000000ff000000ULL;
+ if (Imm & 0x04) EncVal |= 0x0000000000ff0000ULL;
+ if (Imm & 0x02) EncVal |= 0x000000000000ff00ULL;
+ if (Imm & 0x01) EncVal |= 0x00000000000000ffULL;
+ return EncVal;
+}
+
+// aBbbbbbc defgh000 0x00 0x00 aBbbbbbc defgh000 0x00 0x00
+static inline bool isAdvSIMDModImmType11(uint64_t Imm) {
+ uint64_t BString = (Imm & 0x7E000000ULL) >> 25;
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ (BString == 0x1f || BString == 0x20) &&
+ ((Imm & 0x0007ffff0007ffffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType11(uint64_t Imm) {
+ uint8_t BitA = (Imm & 0x80000000ULL) != 0;
+ uint8_t BitB = (Imm & 0x20000000ULL) != 0;
+ uint8_t BitC = (Imm & 0x01000000ULL) != 0;
+ uint8_t BitD = (Imm & 0x00800000ULL) != 0;
+ uint8_t BitE = (Imm & 0x00400000ULL) != 0;
+ uint8_t BitF = (Imm & 0x00200000ULL) != 0;
+ uint8_t BitG = (Imm & 0x00100000ULL) != 0;
+ uint8_t BitH = (Imm & 0x00080000ULL) != 0;
+
+ uint8_t EncVal = BitA;
+ EncVal <<= 1;
+ EncVal |= BitB;
+ EncVal <<= 1;
+ EncVal |= BitC;
+ EncVal <<= 1;
+ EncVal |= BitD;
+ EncVal <<= 1;
+ EncVal |= BitE;
+ EncVal <<= 1;
+ EncVal |= BitF;
+ EncVal <<= 1;
+ EncVal |= BitG;
+ EncVal <<= 1;
+ EncVal |= BitH;
+ return EncVal;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType11(uint8_t Imm) {
+ uint64_t EncVal = 0;
+ if (Imm & 0x80) EncVal |= 0x80000000ULL;
+ if (Imm & 0x40) EncVal |= 0x3e000000ULL;
+ else EncVal |= 0x40000000ULL;
+ if (Imm & 0x20) EncVal |= 0x01000000ULL;
+ if (Imm & 0x10) EncVal |= 0x00800000ULL;
+ if (Imm & 0x08) EncVal |= 0x00400000ULL;
+ if (Imm & 0x04) EncVal |= 0x00200000ULL;
+ if (Imm & 0x02) EncVal |= 0x00100000ULL;
+ if (Imm & 0x01) EncVal |= 0x00080000ULL;
+ return (EncVal << 32) | EncVal;
+}
+
+// aBbbbbbb bbcdefgh 0x00 0x00 0x00 0x00 0x00 0x00
+static inline bool isAdvSIMDModImmType12(uint64_t Imm) {
+ uint64_t BString = (Imm & 0x7fc0000000000000ULL) >> 54;
+ return ((BString == 0xff || BString == 0x100) &&
+ ((Imm & 0x0000ffffffffffffULL) == 0));
+}
+
+static inline uint8_t encodeAdvSIMDModImmType12(uint64_t Imm) {
+ uint8_t BitA = (Imm & 0x8000000000000000ULL) != 0;
+ uint8_t BitB = (Imm & 0x0040000000000000ULL) != 0;
+ uint8_t BitC = (Imm & 0x0020000000000000ULL) != 0;
+ uint8_t BitD = (Imm & 0x0010000000000000ULL) != 0;
+ uint8_t BitE = (Imm & 0x0008000000000000ULL) != 0;
+ uint8_t BitF = (Imm & 0x0004000000000000ULL) != 0;
+ uint8_t BitG = (Imm & 0x0002000000000000ULL) != 0;
+ uint8_t BitH = (Imm & 0x0001000000000000ULL) != 0;
+
+ uint8_t EncVal = BitA;
+ EncVal <<= 1;
+ EncVal |= BitB;
+ EncVal <<= 1;
+ EncVal |= BitC;
+ EncVal <<= 1;
+ EncVal |= BitD;
+ EncVal <<= 1;
+ EncVal |= BitE;
+ EncVal <<= 1;
+ EncVal |= BitF;
+ EncVal <<= 1;
+ EncVal |= BitG;
+ EncVal <<= 1;
+ EncVal |= BitH;
+ return EncVal;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType12(uint8_t Imm) {
+ uint64_t EncVal = 0;
+ if (Imm & 0x80) EncVal |= 0x8000000000000000ULL;
+ if (Imm & 0x40) EncVal |= 0x3fc0000000000000ULL;
+ else EncVal |= 0x4000000000000000ULL;
+ if (Imm & 0x20) EncVal |= 0x0020000000000000ULL;
+ if (Imm & 0x10) EncVal |= 0x0010000000000000ULL;
+ if (Imm & 0x08) EncVal |= 0x0008000000000000ULL;
+ if (Imm & 0x04) EncVal |= 0x0004000000000000ULL;
+ if (Imm & 0x02) EncVal |= 0x0002000000000000ULL;
+ if (Imm & 0x01) EncVal |= 0x0001000000000000ULL;
+ return (EncVal << 32) | EncVal;
+}
+
+} // end namespace AArch64_AM
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index f1452ab..d8900d4 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -6,168 +6,57 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file contains the AArch64 implementation of the MCAsmBackend class,
-// which is principally concerned with relaxation of the various fixup kinds.
-//
-//===----------------------------------------------------------------------===//
+#include "AArch64.h"
+#include "AArch64RegisterInfo.h"
#include "MCTargetDesc/AArch64FixupKinds.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCAsmBackend.h"
-#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/ELF.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/MachO.h"
using namespace llvm;
namespace {
-class AArch64AsmBackend : public MCAsmBackend {
- const MCSubtargetInfo* STI;
-public:
- AArch64AsmBackend(const Target &T, const StringRef TT)
- : MCAsmBackend(),
- STI(AArch64_MC::createAArch64MCSubtargetInfo(TT, "", ""))
- {}
-
-
- ~AArch64AsmBackend() {
- delete STI;
- }
-
- bool writeNopData(uint64_t Count, MCObjectWriter *OW) const;
-
- virtual void processFixupValue(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFixup &Fixup, const MCFragment *DF,
- const MCValue &Target, uint64_t &Value,
- bool &IsResolved);
-};
-} // end anonymous namespace
-
-void AArch64AsmBackend::processFixupValue(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFixup &Fixup,
- const MCFragment *DF,
- const MCValue &Target,
- uint64_t &Value, bool &IsResolved) {
- // The ADRP instruction adds some multiple of 0x1000 to the current PC &
- // ~0xfff. This means that the required offset to reach a symbol can vary by
- // up to one step depending on where the ADRP is in memory. For example:
- //
- // ADRP x0, there
- // there:
- //
- // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
- // we'll need that as an offset. At any other address "there" will be in the
- // same page as the ADRP and the instruction should encode 0x0. Assuming the
- // section isn't 0x1000-aligned, we therefore need to delegate this decision
- // to the linker -- a relocation!
- if ((uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_prel_page ||
- (uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_prel_got_page ||
- (uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_gottprel_page ||
- (uint32_t)Fixup.getKind() == AArch64::fixup_a64_tlsdesc_adr_page)
- IsResolved = false;
-}
-
-static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value);
-
-namespace {
+class AArch64AsmBackend : public MCAsmBackend {
+ static const unsigned PCRelFlagVal =
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
-class ELFAArch64AsmBackend : public AArch64AsmBackend {
- uint8_t OSABI;
- bool IsLittle; // Big or little endian
public:
- ELFAArch64AsmBackend(const Target &T, const StringRef TT,
- uint8_t _OSABI, bool isLittle)
- : AArch64AsmBackend(T, TT), OSABI(_OSABI), IsLittle(isLittle) { }
+ AArch64AsmBackend(const Target &T) : MCAsmBackend() {}
- bool fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
- const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const;
-
- unsigned int getNumFixupKinds() const {
+ unsigned getNumFixupKinds() const override {
return AArch64::NumTargetFixupKinds;
}
- const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
-// This table *must* be in the order that the fixup_* kinds are defined in
-// AArch64FixupKinds.h.
-//
-// Name Offset (bits) Size (bits) Flags
-{ "fixup_a64_ld_prel", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_adr_prel", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_adr_prel_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_add_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst8_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst16_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst32_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst64_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst128_lo12", 0, 32, 0 },
-{ "fixup_a64_tstbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_condbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_uncondbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_call", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_movw_uabs_g0", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g0_nc", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g1", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g1_nc", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g2", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g2_nc", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g3", 0, 32, 0 },
-{ "fixup_a64_movw_sabs_g0", 0, 32, 0 },
-{ "fixup_a64_movw_sabs_g1", 0, 32, 0 },
-{ "fixup_a64_movw_sabs_g2", 0, 32, 0 },
-{ "fixup_a64_adr_prel_got_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_ld64_got_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_movw_dtprel_g2", 0, 32, 0 },
-{ "fixup_a64_movw_dtprel_g1", 0, 32, 0 },
-{ "fixup_a64_movw_dtprel_g1_nc", 0, 32, 0 },
-{ "fixup_a64_movw_dtprel_g0", 0, 32, 0 },
-{ "fixup_a64_movw_dtprel_g0_nc", 0, 32, 0 },
-{ "fixup_a64_add_dtprel_hi12", 0, 32, 0 },
-{ "fixup_a64_add_dtprel_lo12", 0, 32, 0 },
-{ "fixup_a64_add_dtprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst8_dtprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst8_dtprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst16_dtprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst16_dtprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst32_dtprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst32_dtprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst64_dtprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst64_dtprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_movw_gottprel_g1", 0, 32, 0 },
-{ "fixup_a64_movw_gottprel_g0_nc", 0, 32, 0 },
-{ "fixup_a64_adr_gottprel_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_ld64_gottprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ld_gottprel_prel19", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_movw_tprel_g2", 0, 32, 0 },
-{ "fixup_a64_movw_tprel_g1", 0, 32, 0 },
-{ "fixup_a64_movw_tprel_g1_nc", 0, 32, 0 },
-{ "fixup_a64_movw_tprel_g0", 0, 32, 0 },
-{ "fixup_a64_movw_tprel_g0_nc", 0, 32, 0 },
-{ "fixup_a64_add_tprel_hi12", 0, 32, 0 },
-{ "fixup_a64_add_tprel_lo12", 0, 32, 0 },
-{ "fixup_a64_add_tprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst8_tprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst8_tprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst16_tprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst16_tprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst32_tprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst32_tprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst64_tprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst64_tprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_tlsdesc_adr_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_tlsdesc_ld64_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_tlsdesc_add_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_tlsdesc_call", 0, 0, 0 }
+ // This table *must* be in the order that the fixup_* kinds are defined in
+ // AArch64FixupKinds.h.
+ //
+ // Name Offset (bits) Size (bits) Flags
+ { "fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal },
+ { "fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal },
+ { "fixup_aarch64_add_imm12", 10, 12, 0 },
+ { "fixup_aarch64_ldst_imm12_scale1", 10, 12, 0 },
+ { "fixup_aarch64_ldst_imm12_scale2", 10, 12, 0 },
+ { "fixup_aarch64_ldst_imm12_scale4", 10, 12, 0 },
+ { "fixup_aarch64_ldst_imm12_scale8", 10, 12, 0 },
+ { "fixup_aarch64_ldst_imm12_scale16", 10, 12, 0 },
+ { "fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal },
+ { "fixup_aarch64_movw", 5, 16, 0 },
+ { "fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal },
+ { "fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal },
+ { "fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal },
+ { "fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal },
+ { "fixup_aarch64_tlsdesc_call", 0, 0, 0 }
};
+
if (Kind < FirstTargetFixupKind)
return MCAsmBackend::getFixupKindInfo(Kind);
@@ -177,417 +66,501 @@ public:
}
void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value, bool IsPCRel) const {
- unsigned NumBytes = getFixupKindInfo(Fixup.getKind()).TargetSize / 8;
- Value = adjustFixupValue(Fixup.getKind(), Value);
- if (!Value) return; // Doesn't change encoding.
-
- unsigned Offset = Fixup.getOffset();
- assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
-
- // For each byte of the fragment that the fixup touches, mask in the bits
- // from the fixup value.
- for (unsigned i = 0; i != NumBytes; ++i) {
- Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
- }
- }
+ uint64_t Value, bool IsPCRel) const override;
- bool mayNeedRelaxation(const MCInst&) const {
- return false;
- }
+ bool mayNeedRelaxation(const MCInst &Inst) const override;
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override;
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
- void relaxInstruction(const MCInst&, llvm::MCInst&) const {
- llvm_unreachable("Cannot relax instructions");
- }
+ void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createAArch64ELFObjectWriter(OS, OSABI, IsLittle);
- }
+ unsigned getPointerSize() const { return 8; }
};
} // end anonymous namespace
-bool
-ELFAArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
- const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const {
- // Correct for now. With all instructions 32-bit only very low-level
- // considerations could make you select something which may fail.
- return false;
-}
+/// \brief The number of bytes the fixup may change.
+static unsigned getFixupKindNumBytes(unsigned Kind) {
+ switch (Kind) {
+ default:
+ assert(0 && "Unknown fixup kind!");
+ case AArch64::fixup_aarch64_tlsdesc_call:
+ return 0;
-bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
- // Can't emit NOP with size not multiple of 32-bits
- if (Count % 4 != 0)
- return false;
+ case FK_Data_1:
+ return 1;
- uint64_t NumNops = Count / 4;
- for (uint64_t i = 0; i != NumNops; ++i)
- OW->Write32(0xd503201f);
+ case FK_Data_2:
+ case AArch64::fixup_aarch64_movw:
+ return 2;
+
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ case AArch64::fixup_aarch64_add_imm12:
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ case AArch64::fixup_aarch64_ldr_pcrel_imm19:
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ return 3;
+
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ case FK_Data_4:
+ return 4;
- return true;
+ case FK_Data_8:
+ return 8;
+ }
}
-static unsigned ADRImmBits(unsigned Value) {
+static unsigned AdrImmBits(unsigned Value) {
unsigned lo2 = Value & 0x3;
- unsigned hi19 = (Value & 0x1fffff) >> 2;
-
+ unsigned hi19 = (Value & 0x1ffffc) >> 2;
return (hi19 << 5) | (lo2 << 29);
}
static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) {
+ int64_t SignedValue = static_cast<int64_t>(Value);
switch (Kind) {
default:
- llvm_unreachable("Unknown fixup kind!");
- case FK_Data_2:
- assert((int64_t)Value >= -32768 &&
- (int64_t)Value <= 65536 &&
- "Out of range ABS16 fixup");
+ assert(false && "Unknown fixup kind!");
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ if (SignedValue > 2097151 || SignedValue < -2097152)
+ report_fatal_error("fixup value out of range");
+ return AdrImmBits(Value & 0x1fffffULL);
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
+ case AArch64::fixup_aarch64_ldr_pcrel_imm19:
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ // Signed 21-bit immediate
+ if (SignedValue > 2097151 || SignedValue < -2097152)
+ report_fatal_error("fixup value out of range");
+ // Low two bits are not encoded.
+ return (Value >> 2) & 0x7ffff;
+ case AArch64::fixup_aarch64_add_imm12:
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ // Unsigned 12-bit immediate
+ if (Value >= 0x1000)
+ report_fatal_error("invalid imm12 fixup value");
return Value;
- case FK_Data_4:
- assert((int64_t)Value >= -(1LL << 31) &&
- (int64_t)Value <= (1LL << 32) - 1 &&
- "Out of range ABS32 fixup");
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ // Unsigned 12-bit immediate which gets multiplied by 2
+ if (Value & 1 || Value >= 0x2000)
+ report_fatal_error("invalid imm12 fixup value");
+ return Value >> 1;
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ // Unsigned 12-bit immediate which gets multiplied by 4
+ if (Value & 3 || Value >= 0x4000)
+ report_fatal_error("invalid imm12 fixup value");
+ return Value >> 2;
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ // Unsigned 12-bit immediate which gets multiplied by 8
+ if (Value & 7 || Value >= 0x8000)
+ report_fatal_error("invalid imm12 fixup value");
+ return Value >> 3;
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ // Unsigned 12-bit immediate which gets multiplied by 16
+ if (Value & 15 || Value >= 0x10000)
+ report_fatal_error("invalid imm12 fixup value");
+ return Value >> 4;
+ case AArch64::fixup_aarch64_movw:
+ report_fatal_error("no resolvable MOVZ/MOVK fixups supported yet");
return Value;
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ // Signed 16-bit immediate
+ if (SignedValue > 32767 || SignedValue < -32768)
+ report_fatal_error("fixup value out of range");
+ // Low two bits are not encoded (4-byte alignment assumed).
+ if (Value & 0x3)
+ report_fatal_error("fixup not sufficiently aligned");
+ return (Value >> 2) & 0x3fff;
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ // Signed 28-bit immediate
+ if (SignedValue > 134217727 || SignedValue < -134217728)
+ report_fatal_error("fixup value out of range");
+ // Low two bits are not encoded (4-byte alignment assumed).
+ if (Value & 0x3)
+ report_fatal_error("fixup not sufficiently aligned");
+ return (Value >> 2) & 0x3ffffff;
+ case FK_Data_1:
+ case FK_Data_2:
+ case FK_Data_4:
case FK_Data_8:
return Value;
+ }
+}
- case AArch64::fixup_a64_ld_gottprel_prel19:
- // R_AARCH64_LD_GOTTPREL_PREL19: Set a load-literal immediate to bits 1F
- // FFFC of G(TPREL(S+A)) - P; check -2^20 <= X < 2^20.
- case AArch64::fixup_a64_ld_prel:
- // R_AARCH64_LD_PREL_LO19: Sets a load-literal (immediate) value to bits
- // 1F FFFC of S+A-P, checking that -2^20 <= S+A-P < 2^20.
- assert((int64_t)Value >= -(1LL << 20) &&
- (int64_t)Value < (1LL << 20) && "Out of range LDR (lit) fixup");
- return (Value & 0x1ffffc) << 3;
-
- case AArch64::fixup_a64_adr_prel:
- // R_AARCH64_ADR_PREL_LO21: Sets an ADR immediate value to bits 1F FFFF of
- // the result of S+A-P, checking that -2^20 <= S+A-P < 2^20.
- assert((int64_t)Value >= -(1LL << 20) &&
- (int64_t)Value < (1LL << 20) && "Out of range ADR fixup");
- return ADRImmBits(Value & 0x1fffff);
-
- case AArch64::fixup_a64_adr_prel_page:
- // R_AARCH64_ADR_PREL_PG_HI21: Sets an ADRP immediate value to bits 1 FFFF
- // F000 of the result of the operation, checking that -2^32 <= result <
- // 2^32.
- assert((int64_t)Value >= -(1LL << 32) &&
- (int64_t)Value < (1LL << 32) && "Out of range ADRP fixup");
- return ADRImmBits((Value & 0x1fffff000ULL) >> 12);
-
- case AArch64::fixup_a64_add_dtprel_hi12:
- // R_AARCH64_TLSLD_ADD_DTPREL_LO12: Set an ADD immediate field to bits
- // FF F000 of DTPREL(S+A), check 0 <= X < 2^24.
- case AArch64::fixup_a64_add_tprel_hi12:
- // R_AARCH64_TLSLD_ADD_TPREL_LO12: Set an ADD immediate field to bits
- // FF F000 of TPREL(S+A), check 0 <= X < 2^24.
- assert((int64_t)Value >= 0 &&
- (int64_t)Value < (1LL << 24) && "Out of range ADD fixup");
- return (Value & 0xfff000) >> 2;
-
- case AArch64::fixup_a64_add_dtprel_lo12:
- // R_AARCH64_TLSLD_ADD_DTPREL_LO12: Set an ADD immediate field to bits
- // FFF of DTPREL(S+A), check 0 <= X < 2^12.
- case AArch64::fixup_a64_add_tprel_lo12:
- // R_AARCH64_TLSLD_ADD_TPREL_LO12: Set an ADD immediate field to bits
- // FFF of TPREL(S+A), check 0 <= X < 2^12.
- assert((int64_t)Value >= 0 &&
- (int64_t)Value < (1LL << 12) && "Out of range ADD fixup");
- // ... fallthrough to no-checking versions ...
- case AArch64::fixup_a64_add_dtprel_lo12_nc:
- // R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: Set an ADD immediate field to bits
- // FFF of DTPREL(S+A) with no overflow check.
- case AArch64::fixup_a64_add_tprel_lo12_nc:
- // R_AARCH64_TLSLD_ADD_TPREL_LO12_NC: Set an ADD immediate field to bits
- // FFF of TPREL(S+A) with no overflow check.
- case AArch64::fixup_a64_tlsdesc_add_lo12_nc:
- // R_AARCH64_TLSDESC_ADD_LO12_NC: Set an ADD immediate field to bits
- // FFF of G(TLSDESC(S+A)), with no overflow check.
- case AArch64::fixup_a64_add_lo12:
- // R_AARCH64_ADD_ABS_LO12_NC: Sets an ADD immediate value to bits FFF of
- // S+A, with no overflow check.
- return (Value & 0xfff) << 10;
-
- case AArch64::fixup_a64_ldst8_dtprel_lo12:
- // R_AARCH64_TLSLD_LDST8_DTPREL_LO12: Set an LD/ST offset field to bits FFF
- // of DTPREL(S+A), check 0 <= X < 2^12.
- case AArch64::fixup_a64_ldst8_tprel_lo12:
- // R_AARCH64_TLSLE_LDST8_TPREL_LO12: Set an LD/ST offset field to bits FFF
- // of DTPREL(S+A), check 0 <= X < 2^12.
- assert((int64_t) Value >= 0 &&
- (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
- // ... fallthrough to no-checking versions ...
- case AArch64::fixup_a64_ldst8_dtprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST8_DTPREL_LO12: Set an LD/ST offset field to bits FFF
- // of DTPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst8_tprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST8_TPREL_LO12: Set an LD/ST offset field to bits FFF
- // of TPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst8_lo12:
- // R_AARCH64_LDST8_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFF
- // of S+A, with no overflow check.
- return (Value & 0xfff) << 10;
-
- case AArch64::fixup_a64_ldst16_dtprel_lo12:
- // R_AARCH64_TLSLD_LDST16_DTPREL_LO12: Set an LD/ST offset field to bits FFE
- // of DTPREL(S+A), check 0 <= X < 2^12.
- case AArch64::fixup_a64_ldst16_tprel_lo12:
- // R_AARCH64_TLSLE_LDST16_TPREL_LO12: Set an LD/ST offset field to bits FFE
- // of DTPREL(S+A), check 0 <= X < 2^12.
- assert((int64_t) Value >= 0 &&
- (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
- // ... fallthrough to no-checking versions ...
- case AArch64::fixup_a64_ldst16_dtprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST16_DTPREL_LO12: Set an LD/ST offset field to bits FFE
- // of DTPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst16_tprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST16_TPREL_LO12: Set an LD/ST offset field to bits FFE
- // of TPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst16_lo12:
- // R_AARCH64_LDST16_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFE
- // of S+A, with no overflow check.
- return (Value & 0xffe) << 9;
-
- case AArch64::fixup_a64_ldst32_dtprel_lo12:
- // R_AARCH64_TLSLD_LDST32_DTPREL_LO12: Set an LD/ST offset field to bits FFC
- // of DTPREL(S+A), check 0 <= X < 2^12.
- case AArch64::fixup_a64_ldst32_tprel_lo12:
- // R_AARCH64_TLSLE_LDST32_TPREL_LO12: Set an LD/ST offset field to bits FFC
- // of DTPREL(S+A), check 0 <= X < 2^12.
- assert((int64_t) Value >= 0 &&
- (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
- // ... fallthrough to no-checking versions ...
- case AArch64::fixup_a64_ldst32_dtprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST32_DTPREL_LO12: Set an LD/ST offset field to bits FFC
- // of DTPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst32_tprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST32_TPREL_LO12: Set an LD/ST offset field to bits FFC
- // of TPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst32_lo12:
- // R_AARCH64_LDST32_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFC
- // of S+A, with no overflow check.
- return (Value & 0xffc) << 8;
-
- case AArch64::fixup_a64_ldst64_dtprel_lo12:
- // R_AARCH64_TLSLD_LDST64_DTPREL_LO12: Set an LD/ST offset field to bits FF8
- // of DTPREL(S+A), check 0 <= X < 2^12.
- case AArch64::fixup_a64_ldst64_tprel_lo12:
- // R_AARCH64_TLSLE_LDST64_TPREL_LO12: Set an LD/ST offset field to bits FF8
- // of DTPREL(S+A), check 0 <= X < 2^12.
- assert((int64_t) Value >= 0 &&
- (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
- // ... fallthrough to no-checking versions ...
- case AArch64::fixup_a64_ldst64_dtprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST64_DTPREL_LO12: Set an LD/ST offset field to bits FF8
- // of DTPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst64_tprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST64_TPREL_LO12: Set an LD/ST offset field to bits FF8
- // of TPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst64_lo12:
- // R_AARCH64_LDST64_ABS_LO12_NC: Sets an LD/ST immediate value to bits FF8
- // of S+A, with no overflow check.
- return (Value & 0xff8) << 7;
-
- case AArch64::fixup_a64_ldst128_lo12:
- // R_AARCH64_LDST128_ABS_LO12_NC: Sets an LD/ST immediate value to bits FF0
- // of S+A, with no overflow check.
- return (Value & 0xff0) << 6;
-
- case AArch64::fixup_a64_movw_uabs_g0:
- // R_AARCH64_MOVW_UABS_G0: Sets a MOVZ immediate field to bits FFFF of S+A
- // with a check that S+A < 2^16
- assert(Value <= 0xffff && "Out of range move wide fixup");
- return (Value & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_dtprel_g0_nc:
- // R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: Sets a MOVK immediate field to bits
- // FFFF of DTPREL(S+A) with no overflow check.
- case AArch64::fixup_a64_movw_gottprel_g0_nc:
- // R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: Sets a MOVK immediate field to bits
- // FFFF of G(TPREL(S+A)) - GOT with no overflow check.
- case AArch64::fixup_a64_movw_tprel_g0_nc:
- // R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: Sets a MOVK immediate field to bits
- // FFFF of TPREL(S+A) with no overflow check.
- case AArch64::fixup_a64_movw_uabs_g0_nc:
- // R_AARCH64_MOVW_UABS_G0_NC: Sets a MOVK immediate field to bits FFFF of
- // S+A with no overflow check.
- return (Value & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_uabs_g1:
- // R_AARCH64_MOVW_UABS_G1: Sets a MOVZ immediate field to bits FFFF0000 of
- // S+A with a check that S+A < 2^32
- assert(Value <= 0xffffffffull && "Out of range move wide fixup");
- return ((Value >> 16) & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_dtprel_g1_nc:
- // R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: Set a MOVK immediate field
- // to bits FFFF0000 of DTPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_movw_tprel_g1_nc:
- // R_AARCH64_TLSLD_MOVW_TPREL_G1_NC: Set a MOVK immediate field
- // to bits FFFF0000 of TPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_movw_uabs_g1_nc:
- // R_AARCH64_MOVW_UABS_G1_NC: Sets a MOVK immediate field to bits
- // FFFF0000 of S+A with no overflow check.
- return ((Value >> 16) & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_uabs_g2:
- // R_AARCH64_MOVW_UABS_G2: Sets a MOVZ immediate field to bits FFFF 0000
- // 0000 of S+A with a check that S+A < 2^48
- assert(Value <= 0xffffffffffffull && "Out of range move wide fixup");
- return ((Value >> 32) & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_uabs_g2_nc:
- // R_AARCH64_MOVW_UABS_G2: Sets a MOVK immediate field to bits FFFF 0000
- // 0000 of S+A with no overflow check.
- return ((Value >> 32) & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_uabs_g3:
- // R_AARCH64_MOVW_UABS_G3: Sets a MOVZ immediate field to bits FFFF 0000
- // 0000 0000 of S+A (no overflow check needed)
- return ((Value >> 48) & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_dtprel_g0:
- // R_AARCH64_TLSLD_MOVW_DTPREL_G0: Set a MOV[NZ] immediate field
- // to bits FFFF of DTPREL(S+A).
- case AArch64::fixup_a64_movw_tprel_g0:
- // R_AARCH64_TLSLE_MOVW_TPREL_G0: Set a MOV[NZ] immediate field to
- // bits FFFF of TPREL(S+A).
- case AArch64::fixup_a64_movw_sabs_g0: {
- // R_AARCH64_MOVW_SABS_G0: Sets MOV[NZ] immediate field using bits FFFF of
- // S+A (see notes below); check -2^16 <= S+A < 2^16. (notes say that we
- // should convert between MOVN and MOVZ to achieve our goals).
- int64_t Signed = Value;
- assert(Signed >= -(1LL << 16) && Signed < (1LL << 16)
- && "Out of range move wide fixup");
- if (Signed >= 0) {
- Value = (Value & 0xffff) << 5;
- // Bit 30 converts the MOVN encoding into a MOVZ
- Value |= 1 << 30;
- } else {
- // MCCodeEmitter should have encoded a MOVN, which is fine.
- Value = (~Value & 0xffff) << 5;
- }
- return Value;
+void AArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
+ unsigned DataSize, uint64_t Value,
+ bool IsPCRel) const {
+ unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
+ if (!Value)
+ return; // Doesn't change encoding.
+ MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
+ // Apply any target-specific value adjustments.
+ Value = adjustFixupValue(Fixup.getKind(), Value);
+
+ // Shift the value into position.
+ Value <<= Info.TargetOffset;
+
+ unsigned Offset = Fixup.getOffset();
+ assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
+
+ // For each byte of the fragment that the fixup touches, mask in the
+ // bits from the fixup value.
+ for (unsigned i = 0; i != NumBytes; ++i)
+ Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+}
+
+bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
+ return false;
+}
+
+bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const {
+ // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
+ // into the targets for now.
+ //
+ // Relax if the value is too big for a (signed) i8.
+ return int64_t(Value) != int64_t(int8_t(Value));
+}
+
+void AArch64AsmBackend::relaxInstruction(const MCInst &Inst,
+ MCInst &Res) const {
+ assert(false && "AArch64AsmBackend::relaxInstruction() unimplemented");
+}
+
+bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
+ // If the count is not 4-byte aligned, we must be writing data into the text
+ // section (otherwise we have unaligned instructions, and thus have far
+ // bigger problems), so just write zeros instead.
+ if ((Count & 3) != 0) {
+ for (uint64_t i = 0, e = (Count & 3); i != e; ++i)
+ OW->Write8(0);
}
- case AArch64::fixup_a64_movw_dtprel_g1:
- // R_AARCH64_TLSLD_MOVW_DTPREL_G1: Set a MOV[NZ] immediate field
- // to bits FFFF0000 of DTPREL(S+A).
- case AArch64::fixup_a64_movw_gottprel_g1:
- // R_AARCH64_TLSIE_MOVW_GOTTPREL_G1: Set a MOV[NZ] immediate field
- // to bits FFFF0000 of G(TPREL(S+A)) - GOT.
- case AArch64::fixup_a64_movw_tprel_g1:
- // R_AARCH64_TLSLE_MOVW_TPREL_G1: Set a MOV[NZ] immediate field to
- // bits FFFF0000 of TPREL(S+A).
- case AArch64::fixup_a64_movw_sabs_g1: {
- // R_AARCH64_MOVW_SABS_G1: Sets MOV[NZ] immediate field using bits FFFF 0000
- // of S+A (see notes below); check -2^32 <= S+A < 2^32. (notes say that we
- // should convert between MOVN and MOVZ to achieve our goals).
- int64_t Signed = Value;
- assert(Signed >= -(1LL << 32) && Signed < (1LL << 32)
- && "Out of range move wide fixup");
- if (Signed >= 0) {
- Value = ((Value >> 16) & 0xffff) << 5;
- // Bit 30 converts the MOVN encoding into a MOVZ
- Value |= 1 << 30;
- } else {
- Value = ((~Value >> 16) & 0xffff) << 5;
- }
- return Value;
+ // We are properly aligned, so write NOPs as requested.
+ Count /= 4;
+ for (uint64_t i = 0; i != Count; ++i)
+ OW->Write32(0xd503201f);
+ return true;
+}
+
+namespace {
+
+namespace CU {
+
+/// \brief Compact unwind encoding values.
+enum CompactUnwindEncodings {
+ /// \brief A "frameless" leaf function, where no non-volatile registers are
+ /// saved. The return remains in LR throughout the function.
+ UNWIND_AArch64_MODE_FRAMELESS = 0x02000000,
+
+ /// \brief No compact unwind encoding available. Instead the low 23-bits of
+ /// the compact unwind encoding is the offset of the DWARF FDE in the
+ /// __eh_frame section. This mode is never used in object files. It is only
+ /// generated by the linker in final linked images, which have only DWARF info
+ /// for a function.
+ UNWIND_AArch64_MODE_DWARF = 0x03000000,
+
+ /// \brief This is a standard arm64 prologue where FP/LR are immediately
+ /// pushed on the stack, then SP is copied to FP. If there are any
+ /// non-volatile register saved, they are copied into the stack fame in pairs
+ /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
+ /// five X pairs and four D pairs can be saved, but the memory layout must be
+ /// in register number order.
+ UNWIND_AArch64_MODE_FRAME = 0x04000000,
+
+ /// \brief Frame register pair encodings.
+ UNWIND_AArch64_FRAME_X19_X20_PAIR = 0x00000001,
+ UNWIND_AArch64_FRAME_X21_X22_PAIR = 0x00000002,
+ UNWIND_AArch64_FRAME_X23_X24_PAIR = 0x00000004,
+ UNWIND_AArch64_FRAME_X25_X26_PAIR = 0x00000008,
+ UNWIND_AArch64_FRAME_X27_X28_PAIR = 0x00000010,
+ UNWIND_AArch64_FRAME_D8_D9_PAIR = 0x00000100,
+ UNWIND_AArch64_FRAME_D10_D11_PAIR = 0x00000200,
+ UNWIND_AArch64_FRAME_D12_D13_PAIR = 0x00000400,
+ UNWIND_AArch64_FRAME_D14_D15_PAIR = 0x00000800
+};
+
+} // end CU namespace
+
+// FIXME: This should be in a separate file.
+class DarwinAArch64AsmBackend : public AArch64AsmBackend {
+ const MCRegisterInfo &MRI;
+
+ /// \brief Encode compact unwind stack adjustment for frameless functions.
+ /// See UNWIND_AArch64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
+ /// The stack size always needs to be 16 byte aligned.
+ uint32_t encodeStackAdjustment(uint32_t StackSize) const {
+ return (StackSize / 16) << 12;
+ }
+
+public:
+ DarwinAArch64AsmBackend(const Target &T, const MCRegisterInfo &MRI)
+ : AArch64AsmBackend(T), MRI(MRI) {}
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ return createAArch64MachObjectWriter(OS, MachO::CPU_TYPE_ARM64,
+ MachO::CPU_SUBTYPE_ARM64_ALL);
+ }
+
+ bool doesSectionRequireSymbols(const MCSection &Section) const override {
+ // Any section for which the linker breaks things into atoms needs to
+ // preserve symbols, including assembler local symbols, to identify
+ // those atoms. These sections are:
+ // Sections of type:
+ //
+ // S_CSTRING_LITERALS (e.g. __cstring)
+ // S_LITERAL_POINTERS (e.g. objc selector pointers)
+ // S_16BYTE_LITERALS, S_8BYTE_LITERALS, S_4BYTE_LITERALS
+ //
+ // Sections named:
+ //
+ // __TEXT,__eh_frame
+ // __TEXT,__ustring
+ // __DATA,__cfstring
+ // __DATA,__objc_classrefs
+ // __DATA,__objc_catlist
+ //
+ // FIXME: It would be better if the compiler used actual linker local
+ // symbols for each of these sections rather than preserving what
+ // are ostensibly assembler local symbols.
+ const MCSectionMachO &SMO = static_cast<const MCSectionMachO &>(Section);
+ return (SMO.getType() == MachO::S_CSTRING_LITERALS ||
+ SMO.getType() == MachO::S_4BYTE_LITERALS ||
+ SMO.getType() == MachO::S_8BYTE_LITERALS ||
+ SMO.getType() == MachO::S_16BYTE_LITERALS ||
+ SMO.getType() == MachO::S_LITERAL_POINTERS ||
+ (SMO.getSegmentName() == "__TEXT" &&
+ (SMO.getSectionName() == "__eh_frame" ||
+ SMO.getSectionName() == "__ustring")) ||
+ (SMO.getSegmentName() == "__DATA" &&
+ (SMO.getSectionName() == "__cfstring" ||
+ SMO.getSectionName() == "__objc_classrefs" ||
+ SMO.getSectionName() == "__objc_catlist")));
}
- case AArch64::fixup_a64_movw_dtprel_g2:
- // R_AARCH64_TLSLD_MOVW_DTPREL_G2: Set a MOV[NZ] immediate field
- // to bits FFFF 0000 0000 of DTPREL(S+A).
- case AArch64::fixup_a64_movw_tprel_g2:
- // R_AARCH64_TLSLE_MOVW_TPREL_G2: Set a MOV[NZ] immediate field to
- // bits FFFF 0000 0000 of TPREL(S+A).
- case AArch64::fixup_a64_movw_sabs_g2: {
- // R_AARCH64_MOVW_SABS_G2: Sets MOV[NZ] immediate field using bits FFFF 0000
- // 0000 of S+A (see notes below); check -2^48 <= S+A < 2^48. (notes say that
- // we should convert between MOVN and MOVZ to achieve our goals).
- int64_t Signed = Value;
- assert(Signed >= -(1LL << 48) && Signed < (1LL << 48)
- && "Out of range move wide fixup");
- if (Signed >= 0) {
- Value = ((Value >> 32) & 0xffff) << 5;
- // Bit 30 converts the MOVN encoding into a MOVZ
- Value |= 1 << 30;
- } else {
- Value = ((~Value >> 32) & 0xffff) << 5;
+ /// \brief Generate the compact unwind encoding from the CFI directives.
+ uint32_t generateCompactUnwindEncoding(
+ ArrayRef<MCCFIInstruction> Instrs) const override {
+ if (Instrs.empty())
+ return CU::UNWIND_AArch64_MODE_FRAMELESS;
+
+ bool HasFP = false;
+ unsigned StackSize = 0;
+
+ uint32_t CompactUnwindEncoding = 0;
+ for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
+ const MCCFIInstruction &Inst = Instrs[i];
+
+ switch (Inst.getOperation()) {
+ default:
+ // Cannot handle this directive: bail out.
+ return CU::UNWIND_AArch64_MODE_DWARF;
+ case MCCFIInstruction::OpDefCfa: {
+ // Defines a frame pointer.
+ assert(getXRegFromWReg(MRI.getLLVMRegNum(Inst.getRegister(), true)) ==
+ AArch64::FP &&
+ "Invalid frame pointer!");
+ assert(i + 2 < e && "Insufficient CFI instructions to define a frame!");
+
+ const MCCFIInstruction &LRPush = Instrs[++i];
+ assert(LRPush.getOperation() == MCCFIInstruction::OpOffset &&
+ "Link register not pushed!");
+ const MCCFIInstruction &FPPush = Instrs[++i];
+ assert(FPPush.getOperation() == MCCFIInstruction::OpOffset &&
+ "Frame pointer not pushed!");
+
+ unsigned LRReg = MRI.getLLVMRegNum(LRPush.getRegister(), true);
+ unsigned FPReg = MRI.getLLVMRegNum(FPPush.getRegister(), true);
+
+ LRReg = getXRegFromWReg(LRReg);
+ FPReg = getXRegFromWReg(FPReg);
+
+ assert(LRReg == AArch64::LR && FPReg == AArch64::FP &&
+ "Pushing invalid registers for frame!");
+
+ // Indicate that the function has a frame.
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_MODE_FRAME;
+ HasFP = true;
+ break;
+ }
+ case MCCFIInstruction::OpDefCfaOffset: {
+ assert(StackSize == 0 && "We already have the CFA offset!");
+ StackSize = std::abs(Inst.getOffset());
+ break;
+ }
+ case MCCFIInstruction::OpOffset: {
+ // Registers are saved in pairs. We expect there to be two consecutive
+ // `.cfi_offset' instructions with the appropriate registers specified.
+ unsigned Reg1 = MRI.getLLVMRegNum(Inst.getRegister(), true);
+ if (i + 1 == e)
+ return CU::UNWIND_AArch64_MODE_DWARF;
+
+ const MCCFIInstruction &Inst2 = Instrs[++i];
+ if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
+ return CU::UNWIND_AArch64_MODE_DWARF;
+ unsigned Reg2 = MRI.getLLVMRegNum(Inst2.getRegister(), true);
+
+ // N.B. The encodings must be in register number order, and the X
+ // registers before the D registers.
+
+ // X19/X20 pair = 0x00000001,
+ // X21/X22 pair = 0x00000002,
+ // X23/X24 pair = 0x00000004,
+ // X25/X26 pair = 0x00000008,
+ // X27/X28 pair = 0x00000010
+ Reg1 = getXRegFromWReg(Reg1);
+ Reg2 = getXRegFromWReg(Reg2);
+
+ if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
+ (CompactUnwindEncoding & 0xF1E) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X19_X20_PAIR;
+ else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
+ (CompactUnwindEncoding & 0xF1C) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X21_X22_PAIR;
+ else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
+ (CompactUnwindEncoding & 0xF18) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X23_X24_PAIR;
+ else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
+ (CompactUnwindEncoding & 0xF10) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X25_X26_PAIR;
+ else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
+ (CompactUnwindEncoding & 0xF00) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X27_X28_PAIR;
+ else {
+ Reg1 = getDRegFromBReg(Reg1);
+ Reg2 = getDRegFromBReg(Reg2);
+
+ // D8/D9 pair = 0x00000100,
+ // D10/D11 pair = 0x00000200,
+ // D12/D13 pair = 0x00000400,
+ // D14/D15 pair = 0x00000800
+ if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
+ (CompactUnwindEncoding & 0xE00) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D8_D9_PAIR;
+ else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
+ (CompactUnwindEncoding & 0xC00) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D10_D11_PAIR;
+ else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
+ (CompactUnwindEncoding & 0x800) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D12_D13_PAIR;
+ else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D14_D15_PAIR;
+ else
+ // A pair was pushed which we cannot handle.
+ return CU::UNWIND_AArch64_MODE_DWARF;
+ }
+
+ break;
+ }
+ }
}
- return Value;
+
+ if (!HasFP) {
+ // With compact unwind info we can only represent stack adjustments of up
+ // to 65520 bytes.
+ if (StackSize > 65520)
+ return CU::UNWIND_AArch64_MODE_DWARF;
+
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_MODE_FRAMELESS;
+ CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
+ }
+
+ return CompactUnwindEncoding;
}
+};
- case AArch64::fixup_a64_tstbr:
- // R_AARCH64_TSTBR14: Sets the immediate field of a TBZ/TBNZ instruction to
- // bits FFFC of S+A-P, checking -2^15 <= S+A-P < 2^15.
- assert((int64_t)Value >= -(1LL << 15) &&
- (int64_t)Value < (1LL << 15) && "Out of range TBZ/TBNZ fixup");
- return (Value & 0xfffc) << (5 - 2);
-
- case AArch64::fixup_a64_condbr:
- // R_AARCH64_CONDBR19: Sets the immediate field of a conditional branch
- // instruction to bits 1FFFFC of S+A-P, checking -2^20 <= S+A-P < 2^20.
- assert((int64_t)Value >= -(1LL << 20) &&
- (int64_t)Value < (1LL << 20) && "Out of range B.cond fixup");
- return (Value & 0x1ffffc) << (5 - 2);
-
- case AArch64::fixup_a64_uncondbr:
- // R_AARCH64_JUMP26 same as below (except to a linker, possibly).
- case AArch64::fixup_a64_call:
- // R_AARCH64_CALL26: Sets a CALL immediate field to bits FFFFFFC of S+A-P,
- // checking that -2^27 <= S+A-P < 2^27.
- assert((int64_t)Value >= -(1LL << 27) &&
- (int64_t)Value < (1LL << 27) && "Out of range branch fixup");
- return (Value & 0xffffffc) >> 2;
-
- case AArch64::fixup_a64_adr_gottprel_page:
- // R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: Set an ADRP immediate field to bits
- // 1FFFFF000 of Page(G(TPREL(S+A))) - Page(P); check -2^32 <= X < 2^32.
- case AArch64::fixup_a64_tlsdesc_adr_page:
- // R_AARCH64_TLSDESC_ADR_PAGE: Set an ADRP immediate field to bits 1FFFFF000
- // of Page(G(TLSDESC(S+A))) - Page(P); check -2^32 <= X < 2^32.
- case AArch64::fixup_a64_adr_prel_got_page:
- // R_AARCH64_ADR_GOT_PAGE: Sets the immediate value of an ADRP to bits
- // 1FFFFF000 of the operation, checking that -2^32 < Page(G(S))-Page(GOT) <
- // 2^32.
- assert((int64_t)Value >= -(1LL << 32) &&
- (int64_t)Value < (1LL << 32) && "Out of range ADRP fixup");
- return ADRImmBits((Value & 0x1fffff000ULL) >> 12);
-
- case AArch64::fixup_a64_ld64_gottprel_lo12_nc:
- // R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: Set an LD offset field to bits FF8
- // of X, with no overflow check. Check that X & 7 == 0.
- case AArch64::fixup_a64_tlsdesc_ld64_lo12_nc:
- // R_AARCH64_TLSDESC_LD64_LO12_NC: Set an LD offset field to bits FF8 of
- // G(TLSDESC(S+A)), with no overflow check. Check that X & 7 == 0.
- case AArch64::fixup_a64_ld64_got_lo12_nc:
- // R_AARCH64_LD64_GOT_LO12_NC: Sets the LD/ST immediate field to bits FF8 of
- // G(S) with no overflow check. Check X & 7 == 0
- assert(((int64_t)Value & 7) == 0 && "Misaligned fixup");
- return (Value & 0xff8) << 7;
-
- case AArch64::fixup_a64_tlsdesc_call:
- // R_AARCH64_TLSDESC_CALL: For relaxation only.
- return 0;
+} // end anonymous namespace
+
+namespace {
+
+class ELFAArch64AsmBackend : public AArch64AsmBackend {
+public:
+ uint8_t OSABI;
+ bool IsLittleEndian;
+
+ ELFAArch64AsmBackend(const Target &T, uint8_t OSABI, bool IsLittleEndian)
+ : AArch64AsmBackend(T), OSABI(OSABI), IsLittleEndian(IsLittleEndian) {}
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ return createAArch64ELFObjectWriter(OS, OSABI, IsLittleEndian);
}
+
+ void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) override;
+
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value, bool IsPCRel) const override;
+};
+
+void ELFAArch64AsmBackend::processFixupValue(
+ const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFixup &Fixup,
+ const MCFragment *DF, const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) {
+ // The ADRP instruction adds some multiple of 0x1000 to the current PC &
+ // ~0xfff. This means that the required offset to reach a symbol can vary by
+ // up to one step depending on where the ADRP is in memory. For example:
+ //
+ // ADRP x0, there
+ // there:
+ //
+ // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
+ // we'll need that as an offset. At any other address "there" will be in the
+ // same page as the ADRP and the instruction should encode 0x0. Assuming the
+ // section isn't 0x1000-aligned, we therefore need to delegate this decision
+ // to the linker -- a relocation!
+ if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21)
+ IsResolved = false;
+}
+
+void ELFAArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
+ unsigned DataSize, uint64_t Value,
+ bool IsPCRel) const {
+ // store fixups in .eh_frame section in big endian order
+ if (!IsLittleEndian && Fixup.getKind() == FK_Data_4) {
+ const MCSection *Sec = Fixup.getValue()->FindAssociatedSection();
+ const MCSectionELF *SecELF = static_cast<const MCSectionELF *>(Sec);
+ if (SecELF->getSectionName() == ".eh_frame")
+ Value = ByteSwap_32(unsigned(Value));
+ }
+ AArch64AsmBackend::applyFixup (Fixup, Data, DataSize, Value, IsPCRel);
+}
}
-MCAsmBackend *
-llvm::createAArch64leAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
+MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
Triple TheTriple(TT);
- return new ELFAArch64AsmBackend(T, TT, TheTriple.getOS(), /*isLittle*/ true);
+
+ if (TheTriple.isOSDarwin())
+ return new DarwinAArch64AsmBackend(T, MRI);
+
+ assert(TheTriple.isOSBinFormatELF() && "Expect either MachO or ELF target");
+ return new ELFAArch64AsmBackend(T, TheTriple.getOS(), /*IsLittleEndian=*/true);
}
-MCAsmBackend *
-llvm::createAArch64beAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
+MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
Triple TheTriple(TT);
- return new ELFAArch64AsmBackend(T, TT, TheTriple.getOS(), /*isLittle*/ false);
+
+ assert(TheTriple.isOSBinFormatELF() &&
+ "Big endian is only supported for ELF targets!");
+ return new ELFAArch64AsmBackend(T, TheTriple.getOS(),
+ /*IsLittleEndian=*/false);
}
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
index a5fe914..e05191e 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCExpr.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCValue.h"
@@ -35,257 +36,222 @@ private:
};
}
-AArch64ELFObjectWriter::AArch64ELFObjectWriter(uint8_t OSABI, bool IsLittleEndian)
- : MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_AARCH64,
- /*HasRelocationAddend*/ true)
-{}
+AArch64ELFObjectWriter::AArch64ELFObjectWriter(uint8_t OSABI,
+ bool IsLittleEndian)
+ : MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_AARCH64,
+ /*HasRelocationAddend*/ true) {}
-AArch64ELFObjectWriter::~AArch64ELFObjectWriter()
-{}
+AArch64ELFObjectWriter::~AArch64ELFObjectWriter() {}
unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- unsigned Type;
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ AArch64MCExpr::VariantKind RefKind =
+ static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
+ AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
+ bool IsNC = AArch64MCExpr::isNotChecked(RefKind);
+
+ assert((!Target.getSymA() ||
+ Target.getSymA()->getKind() == MCSymbolRefExpr::VK_None) &&
+ "Should only be expression-level modifiers here");
+
+ assert((!Target.getSymB() ||
+ Target.getSymB()->getKind() == MCSymbolRefExpr::VK_None) &&
+ "Should only be expression-level modifiers here");
+
if (IsPCRel) {
switch ((unsigned)Fixup.getKind()) {
- default:
- llvm_unreachable("Unimplemented fixup -> relocation");
- case FK_Data_8:
- return ELF::R_AARCH64_PREL64;
- case FK_Data_4:
- return ELF::R_AARCH64_PREL32;
case FK_Data_2:
return ELF::R_AARCH64_PREL16;
- case AArch64::fixup_a64_ld_prel:
- Type = ELF::R_AARCH64_LD_PREL_LO19;
- break;
- case AArch64::fixup_a64_adr_prel:
- Type = ELF::R_AARCH64_ADR_PREL_LO21;
- break;
- case AArch64::fixup_a64_adr_prel_page:
- Type = ELF::R_AARCH64_ADR_PREL_PG_HI21;
- break;
- case AArch64::fixup_a64_adr_prel_got_page:
- Type = ELF::R_AARCH64_ADR_GOT_PAGE;
- break;
- case AArch64::fixup_a64_tstbr:
- Type = ELF::R_AARCH64_TSTBR14;
- break;
- case AArch64::fixup_a64_condbr:
- Type = ELF::R_AARCH64_CONDBR19;
- break;
- case AArch64::fixup_a64_uncondbr:
- Type = ELF::R_AARCH64_JUMP26;
- break;
- case AArch64::fixup_a64_call:
- Type = ELF::R_AARCH64_CALL26;
- break;
- case AArch64::fixup_a64_adr_gottprel_page:
- Type = ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
- break;
- case AArch64::fixup_a64_ld_gottprel_prel19:
- Type = ELF::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19;
- break;
- case AArch64::fixup_a64_tlsdesc_adr_page:
- Type = ELF::R_AARCH64_TLSDESC_ADR_PAGE;
- break;
+ case FK_Data_4:
+ return ELF::R_AARCH64_PREL32;
+ case FK_Data_8:
+ return ELF::R_AARCH64_PREL64;
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ assert(SymLoc == AArch64MCExpr::VK_NONE && "unexpected ADR relocation");
+ return ELF::R_AARCH64_ADR_PREL_LO21;
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ if (SymLoc == AArch64MCExpr::VK_ABS && !IsNC)
+ return ELF::R_AARCH64_ADR_PREL_PG_HI21;
+ if (SymLoc == AArch64MCExpr::VK_GOT && !IsNC)
+ return ELF::R_AARCH64_ADR_GOT_PAGE;
+ if (SymLoc == AArch64MCExpr::VK_GOTTPREL && !IsNC)
+ return ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
+ if (SymLoc == AArch64MCExpr::VK_TLSDESC && !IsNC)
+ return ELF::R_AARCH64_TLSDESC_ADR_PAGE;
+ llvm_unreachable("invalid symbol kind for ADRP relocation");
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ return ELF::R_AARCH64_JUMP26;
+ case AArch64::fixup_aarch64_pcrel_call26:
+ return ELF::R_AARCH64_CALL26;
+ case AArch64::fixup_aarch64_ldr_pcrel_imm19:
+ if (SymLoc == AArch64MCExpr::VK_GOTTPREL)
+ return ELF::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19;
+ return ELF::R_AARCH64_LD_PREL_LO19;
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ return ELF::R_AARCH64_TSTBR14;
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ return ELF::R_AARCH64_CONDBR19;
+ default:
+ llvm_unreachable("Unsupported pc-relative fixup kind");
}
} else {
switch ((unsigned)Fixup.getKind()) {
- default:
- llvm_unreachable("Unimplemented fixup -> relocation");
- case FK_Data_8:
- return ELF::R_AARCH64_ABS64;
- case FK_Data_4:
- return ELF::R_AARCH64_ABS32;
case FK_Data_2:
return ELF::R_AARCH64_ABS16;
- case AArch64::fixup_a64_add_lo12:
- Type = ELF::R_AARCH64_ADD_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_ld64_got_lo12_nc:
- Type = ELF::R_AARCH64_LD64_GOT_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst8_lo12:
- Type = ELF::R_AARCH64_LDST8_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst16_lo12:
- Type = ELF::R_AARCH64_LDST16_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst32_lo12:
- Type = ELF::R_AARCH64_LDST32_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst64_lo12:
- Type = ELF::R_AARCH64_LDST64_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst128_lo12:
- Type = ELF::R_AARCH64_LDST128_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_movw_uabs_g0:
- Type = ELF::R_AARCH64_MOVW_UABS_G0;
- break;
- case AArch64::fixup_a64_movw_uabs_g0_nc:
- Type = ELF::R_AARCH64_MOVW_UABS_G0_NC;
- break;
- case AArch64::fixup_a64_movw_uabs_g1:
- Type = ELF::R_AARCH64_MOVW_UABS_G1;
- break;
- case AArch64::fixup_a64_movw_uabs_g1_nc:
- Type = ELF::R_AARCH64_MOVW_UABS_G1_NC;
- break;
- case AArch64::fixup_a64_movw_uabs_g2:
- Type = ELF::R_AARCH64_MOVW_UABS_G2;
- break;
- case AArch64::fixup_a64_movw_uabs_g2_nc:
- Type = ELF::R_AARCH64_MOVW_UABS_G2_NC;
- break;
- case AArch64::fixup_a64_movw_uabs_g3:
- Type = ELF::R_AARCH64_MOVW_UABS_G3;
- break;
- case AArch64::fixup_a64_movw_sabs_g0:
- Type = ELF::R_AARCH64_MOVW_SABS_G0;
- break;
- case AArch64::fixup_a64_movw_sabs_g1:
- Type = ELF::R_AARCH64_MOVW_SABS_G1;
- break;
- case AArch64::fixup_a64_movw_sabs_g2:
- Type = ELF::R_AARCH64_MOVW_SABS_G2;
- break;
+ case FK_Data_4:
+ return ELF::R_AARCH64_ABS32;
+ case FK_Data_8:
+ return ELF::R_AARCH64_ABS64;
+ case AArch64::fixup_aarch64_add_imm12:
+ if (RefKind == AArch64MCExpr::VK_DTPREL_HI12)
+ return ELF::R_AARCH64_TLSLD_ADD_DTPREL_HI12;
+ if (RefKind == AArch64MCExpr::VK_TPREL_HI12)
+ return ELF::R_AARCH64_TLSLE_ADD_TPREL_HI12;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_LO12_NC)
+ return ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_LO12)
+ return ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12;
+ if (RefKind == AArch64MCExpr::VK_TPREL_LO12_NC)
+ return ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC;
+ if (RefKind == AArch64MCExpr::VK_TPREL_LO12)
+ return ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12;
+ if (RefKind == AArch64MCExpr::VK_TLSDESC_LO12)
+ return ELF::R_AARCH64_TLSDESC_ADD_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_ADD_ABS_LO12_NC;
- // TLS Local-dynamic block
- case AArch64::fixup_a64_movw_dtprel_g2:
- Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G2;
- break;
- case AArch64::fixup_a64_movw_dtprel_g1:
- Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1;
- break;
- case AArch64::fixup_a64_movw_dtprel_g1_nc:
- Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC;
- break;
- case AArch64::fixup_a64_movw_dtprel_g0:
- Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0;
- break;
- case AArch64::fixup_a64_movw_dtprel_g0_nc:
- Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC;
- break;
- case AArch64::fixup_a64_add_dtprel_hi12:
- Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_HI12;
- break;
- case AArch64::fixup_a64_add_dtprel_lo12:
- Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12;
- break;
- case AArch64::fixup_a64_add_dtprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst8_dtprel_lo12:
- Type = ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst8_dtprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst16_dtprel_lo12:
- Type = ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst16_dtprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst32_dtprel_lo12:
- Type = ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst32_dtprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst64_dtprel_lo12:
- Type = ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst64_dtprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC;
- break;
+ report_fatal_error("invalid fixup for add (uimm12) instruction");
+ return 0;
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_LDST8_ABS_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC;
- // TLS initial-exec block
- case AArch64::fixup_a64_movw_gottprel_g1:
- Type = ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
- break;
- case AArch64::fixup_a64_movw_gottprel_g0_nc:
- Type = ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
- break;
- case AArch64::fixup_a64_ld64_gottprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
- break;
+ report_fatal_error("invalid fixup for 8-bit load/store instruction");
+ return 0;
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_LDST16_ABS_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC;
- // TLS local-exec block
- case AArch64::fixup_a64_movw_tprel_g2:
- Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G2;
- break;
- case AArch64::fixup_a64_movw_tprel_g1:
- Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1;
- break;
- case AArch64::fixup_a64_movw_tprel_g1_nc:
- Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC;
- break;
- case AArch64::fixup_a64_movw_tprel_g0:
- Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0;
- break;
- case AArch64::fixup_a64_movw_tprel_g0_nc:
- Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC;
- break;
- case AArch64::fixup_a64_add_tprel_hi12:
- Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_HI12;
- break;
- case AArch64::fixup_a64_add_tprel_lo12:
- Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12;
- break;
- case AArch64::fixup_a64_add_tprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst8_tprel_lo12:
- Type = ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst8_tprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst16_tprel_lo12:
- Type = ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst16_tprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst32_tprel_lo12:
- Type = ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst32_tprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst64_tprel_lo12:
- Type = ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst64_tprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC;
- break;
+ report_fatal_error("invalid fixup for 16-bit load/store instruction");
+ return 0;
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_LDST32_ABS_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC;
- // TLS general-dynamic block
- case AArch64::fixup_a64_tlsdesc_adr_page:
- Type = ELF::R_AARCH64_TLSDESC_ADR_PAGE;
- break;
- case AArch64::fixup_a64_tlsdesc_ld64_lo12_nc:
- Type = ELF::R_AARCH64_TLSDESC_LD64_LO12_NC;
- break;
- case AArch64::fixup_a64_tlsdesc_add_lo12_nc:
- Type = ELF::R_AARCH64_TLSDESC_ADD_LO12_NC;
- break;
- case AArch64::fixup_a64_tlsdesc_call:
- Type = ELF::R_AARCH64_TLSDESC_CALL;
- break;
+ report_fatal_error("invalid fixup for 32-bit load/store instruction");
+ return 0;
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_LDST64_ABS_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_GOT && IsNC)
+ return ELF::R_AARCH64_LD64_GOT_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_GOTTPREL && IsNC)
+ return ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_TLSDESC && IsNC)
+ return ELF::R_AARCH64_TLSDESC_LD64_LO12_NC;
+
+ report_fatal_error("invalid fixup for 64-bit load/store instruction");
+ return 0;
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_LDST128_ABS_LO12_NC;
+
+ report_fatal_error("invalid fixup for 128-bit load/store instruction");
+ return 0;
+ case AArch64::fixup_aarch64_movw:
+ if (RefKind == AArch64MCExpr::VK_ABS_G3)
+ return ELF::R_AARCH64_MOVW_UABS_G3;
+ if (RefKind == AArch64MCExpr::VK_ABS_G2)
+ return ELF::R_AARCH64_MOVW_UABS_G2;
+ if (RefKind == AArch64MCExpr::VK_ABS_G2_S)
+ return ELF::R_AARCH64_MOVW_SABS_G2;
+ if (RefKind == AArch64MCExpr::VK_ABS_G2_NC)
+ return ELF::R_AARCH64_MOVW_UABS_G2_NC;
+ if (RefKind == AArch64MCExpr::VK_ABS_G1)
+ return ELF::R_AARCH64_MOVW_UABS_G1;
+ if (RefKind == AArch64MCExpr::VK_ABS_G1_S)
+ return ELF::R_AARCH64_MOVW_SABS_G1;
+ if (RefKind == AArch64MCExpr::VK_ABS_G1_NC)
+ return ELF::R_AARCH64_MOVW_UABS_G1_NC;
+ if (RefKind == AArch64MCExpr::VK_ABS_G0)
+ return ELF::R_AARCH64_MOVW_UABS_G0;
+ if (RefKind == AArch64MCExpr::VK_ABS_G0_S)
+ return ELF::R_AARCH64_MOVW_SABS_G0;
+ if (RefKind == AArch64MCExpr::VK_ABS_G0_NC)
+ return ELF::R_AARCH64_MOVW_UABS_G0_NC;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G2)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G2;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G1)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G1_NC)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G0)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G0_NC)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G2)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G2;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G1)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G1_NC)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G0)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G0_NC)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC;
+ if (RefKind == AArch64MCExpr::VK_GOTTPREL_G1)
+ return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
+ if (RefKind == AArch64MCExpr::VK_GOTTPREL_G0_NC)
+ return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
+ report_fatal_error("invalid fixup for movz/movk instruction");
+ return 0;
+ case AArch64::fixup_aarch64_tlsdesc_call:
+ return ELF::R_AARCH64_TLSDESC_CALL;
+ default:
+ llvm_unreachable("Unknown ELF relocation type");
}
}
- return Type;
+ llvm_unreachable("Unimplemented fixup -> relocation");
}
MCObjectWriter *llvm::createAArch64ELFObjectWriter(raw_ostream &OS,
- uint8_t OSABI,
- bool IsLittleEndian) {
- MCELFObjectTargetWriter *MOTW = new AArch64ELFObjectWriter(OSABI, IsLittleEndian);
- return createELFObjectWriter(MOTW, OS, IsLittleEndian);
+ uint8_t OSABI,
+ bool IsLittleEndian) {
+ MCELFObjectTargetWriter *MOTW =
+ new AArch64ELFObjectWriter(OSABI, IsLittleEndian);
+ return createELFObjectWriter(MOTW, OS, IsLittleEndian);
}
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
index 473b7dd..a79406d 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
@@ -56,14 +56,14 @@ namespace {
class AArch64ELFStreamer : public MCELFStreamer {
public:
AArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS,
- MCCodeEmitter *Emitter)
+ MCCodeEmitter *Emitter)
: MCELFStreamer(Context, TAB, OS, Emitter), MappingSymbolCounter(0),
LastEMS(EMS_None) {}
~AArch64ELFStreamer() {}
- virtual void ChangeSection(const MCSection *Section,
- const MCExpr *Subsection) {
+ void ChangeSection(const MCSection *Section,
+ const MCExpr *Subsection) override {
// We have to keep track of the mapping symbol state of any sections we
// use. Each one should start off as EMS_None, which is provided as the
// default constructor by DenseMap::lookup.
@@ -76,7 +76,8 @@ public:
/// This function is the one used to emit instruction data into the ELF
/// streamer. We override it to add the appropriate mapping symbol if
/// necessary.
- virtual void EmitInstruction(const MCInst& Inst, const MCSubtargetInfo &STI) {
+ void EmitInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI) override {
EmitA64MappingSymbol();
MCELFStreamer::EmitInstruction(Inst, STI);
}
@@ -84,7 +85,7 @@ public:
/// This is one of the functions used to emit data into an ELF section, so the
/// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
/// if necessary.
- virtual void EmitBytes(StringRef Data) {
+ void EmitBytes(StringRef Data) override {
EmitDataMappingSymbol();
MCELFStreamer::EmitBytes(Data);
}
@@ -92,7 +93,8 @@ public:
/// This is one of the functions used to emit data into an ELF section, so the
/// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
/// if necessary.
- virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) {
+ void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc) override {
EmitDataMappingSymbol();
MCELFStreamer::EmitValueImpl(Value, Size);
}
@@ -105,13 +107,15 @@ private:
};
void EmitDataMappingSymbol() {
- if (LastEMS == EMS_Data) return;
+ if (LastEMS == EMS_Data)
+ return;
EmitMappingSymbol("$d");
LastEMS = EMS_Data;
}
void EmitA64MappingSymbol() {
- if (LastEMS == EMS_A64) return;
+ if (LastEMS == EMS_A64)
+ return;
EmitMappingSymbol("$x");
LastEMS = EMS_A64;
}
@@ -120,15 +124,14 @@ private:
MCSymbol *Start = getContext().CreateTempSymbol();
EmitLabel(Start);
- MCSymbol *Symbol =
- getContext().GetOrCreateSymbol(Name + "." +
- Twine(MappingSymbolCounter++));
+ MCSymbol *Symbol = getContext().GetOrCreateSymbol(
+ Name + "." + Twine(MappingSymbolCounter++));
MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
MCELF::SetType(SD, ELF::STT_NOTYPE);
MCELF::SetBinding(SD, ELF::STB_LOCAL);
SD.setExternal(false);
- AssignSection(Symbol, getCurrentSection().first);
+ Symbol->setSection(*getCurrentSection().first);
const MCExpr *Value = MCSymbolRefExpr::Create(Start, getContext());
Symbol->setVariableValue(Value);
@@ -144,16 +147,14 @@ private:
}
namespace llvm {
- MCELFStreamer* createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack) {
- AArch64ELFStreamer *S = new AArch64ELFStreamer(Context, TAB, OS, Emitter);
- if (RelaxAll)
- S->getAssembler().setRelaxAll(true);
- if (NoExecStack)
- S->getAssembler().setNoExecStack(true);
- return S;
- }
+MCELFStreamer *createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter,
+ bool RelaxAll, bool NoExecStack) {
+ AArch64ELFStreamer *S = new AArch64ELFStreamer(Context, TAB, OS, Emitter);
+ if (RelaxAll)
+ S->getAssembler().setRelaxAll(true);
+ if (NoExecStack)
+ S->getAssembler().setNoExecStack(true);
+ return S;
+}
}
-
-
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h
index 5a89ca5..bc6973b 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h
@@ -18,10 +18,9 @@
namespace llvm {
- MCELFStreamer* createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS,
- MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack);
+MCELFStreamer *createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter,
+ bool RelaxAll, bool NoExecStack);
}
#endif // AArch64_ELF_STREAMER_H
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h b/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
index eeb122d..bf405fb 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
@@ -1,4 +1,4 @@
-//=- AArch64/AArch64FixupKinds.h - AArch64 Specific Fixup Entries -*- C++ -*-=//
+//===-- AArch64FixupKinds.h - AArch64 Specific Fixup Entries ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,108 +6,71 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file describes the LLVM fixups applied to MCInsts in the AArch64
-// backend.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64_AARCH64FIXUPKINDS_H
-#define LLVM_AARCH64_AARCH64FIXUPKINDS_H
+#ifndef LLVM_AArch64FIXUPKINDS_H
+#define LLVM_AArch64FIXUPKINDS_H
#include "llvm/MC/MCFixup.h"
namespace llvm {
- namespace AArch64 {
- enum Fixups {
- fixup_a64_ld_prel = FirstTargetFixupKind,
- fixup_a64_adr_prel,
- fixup_a64_adr_prel_page,
-
- fixup_a64_add_lo12,
-
- fixup_a64_ldst8_lo12,
- fixup_a64_ldst16_lo12,
- fixup_a64_ldst32_lo12,
- fixup_a64_ldst64_lo12,
- fixup_a64_ldst128_lo12,
-
- fixup_a64_tstbr,
- fixup_a64_condbr,
- fixup_a64_uncondbr,
- fixup_a64_call,
-
- fixup_a64_movw_uabs_g0,
- fixup_a64_movw_uabs_g0_nc,
- fixup_a64_movw_uabs_g1,
- fixup_a64_movw_uabs_g1_nc,
- fixup_a64_movw_uabs_g2,
- fixup_a64_movw_uabs_g2_nc,
- fixup_a64_movw_uabs_g3,
-
- fixup_a64_movw_sabs_g0,
- fixup_a64_movw_sabs_g1,
- fixup_a64_movw_sabs_g2,
-
- fixup_a64_adr_prel_got_page,
- fixup_a64_ld64_got_lo12_nc,
-
- // Produce offsets relative to the module's dynamic TLS area.
- fixup_a64_movw_dtprel_g2,
- fixup_a64_movw_dtprel_g1,
- fixup_a64_movw_dtprel_g1_nc,
- fixup_a64_movw_dtprel_g0,
- fixup_a64_movw_dtprel_g0_nc,
- fixup_a64_add_dtprel_hi12,
- fixup_a64_add_dtprel_lo12,
- fixup_a64_add_dtprel_lo12_nc,
- fixup_a64_ldst8_dtprel_lo12,
- fixup_a64_ldst8_dtprel_lo12_nc,
- fixup_a64_ldst16_dtprel_lo12,
- fixup_a64_ldst16_dtprel_lo12_nc,
- fixup_a64_ldst32_dtprel_lo12,
- fixup_a64_ldst32_dtprel_lo12_nc,
- fixup_a64_ldst64_dtprel_lo12,
- fixup_a64_ldst64_dtprel_lo12_nc,
-
- // Produce the GOT entry containing a variable's address in TLS's
- // initial-exec mode.
- fixup_a64_movw_gottprel_g1,
- fixup_a64_movw_gottprel_g0_nc,
- fixup_a64_adr_gottprel_page,
- fixup_a64_ld64_gottprel_lo12_nc,
- fixup_a64_ld_gottprel_prel19,
-
- // Produce offsets relative to the thread pointer: TPIDR_EL0.
- fixup_a64_movw_tprel_g2,
- fixup_a64_movw_tprel_g1,
- fixup_a64_movw_tprel_g1_nc,
- fixup_a64_movw_tprel_g0,
- fixup_a64_movw_tprel_g0_nc,
- fixup_a64_add_tprel_hi12,
- fixup_a64_add_tprel_lo12,
- fixup_a64_add_tprel_lo12_nc,
- fixup_a64_ldst8_tprel_lo12,
- fixup_a64_ldst8_tprel_lo12_nc,
- fixup_a64_ldst16_tprel_lo12,
- fixup_a64_ldst16_tprel_lo12_nc,
- fixup_a64_ldst32_tprel_lo12,
- fixup_a64_ldst32_tprel_lo12_nc,
- fixup_a64_ldst64_tprel_lo12,
- fixup_a64_ldst64_tprel_lo12_nc,
-
- // Produce the special fixups used by the general-dynamic TLS model.
- fixup_a64_tlsdesc_adr_page,
- fixup_a64_tlsdesc_ld64_lo12_nc,
- fixup_a64_tlsdesc_add_lo12_nc,
- fixup_a64_tlsdesc_call,
-
-
- // Marker
- LastTargetFixupKind,
- NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
- };
- }
-}
+namespace AArch64 {
+
+enum Fixups {
+ // fixup_aarch64_pcrel_adr_imm21 - A 21-bit pc-relative immediate inserted into
+ // an ADR instruction.
+ fixup_aarch64_pcrel_adr_imm21 = FirstTargetFixupKind,
+
+ // fixup_aarch64_pcrel_adrp_imm21 - A 21-bit pc-relative immediate inserted into
+ // an ADRP instruction.
+ fixup_aarch64_pcrel_adrp_imm21,
+
+ // fixup_aarch64_imm12 - 12-bit fixup for add/sub instructions.
+ // No alignment adjustment. All value bits are encoded.
+ fixup_aarch64_add_imm12,
+
+ // fixup_aarch64_ldst_imm12_* - unsigned 12-bit fixups for load and
+ // store instructions.
+ fixup_aarch64_ldst_imm12_scale1,
+ fixup_aarch64_ldst_imm12_scale2,
+ fixup_aarch64_ldst_imm12_scale4,
+ fixup_aarch64_ldst_imm12_scale8,
+ fixup_aarch64_ldst_imm12_scale16,
+
+ // fixup_aarch64_ldr_pcrel_imm19 - The high 19 bits of a 21-bit pc-relative
+ // immediate. Same encoding as fixup_aarch64_pcrel_adrhi, except this is used by
+ // pc-relative loads and generates relocations directly when necessary.
+ fixup_aarch64_ldr_pcrel_imm19,
+
+ // FIXME: comment
+ fixup_aarch64_movw,
+
+ // fixup_aarch64_pcrel_imm14 - The high 14 bits of a 21-bit pc-relative
+ // immediate.
+ fixup_aarch64_pcrel_branch14,
+
+ // fixup_aarch64_pcrel_branch19 - The high 19 bits of a 21-bit pc-relative
+ // immediate. Same encoding as fixup_aarch64_pcrel_adrhi, except this is use by
+ // b.cc and generates relocations directly when necessary.
+ fixup_aarch64_pcrel_branch19,
+
+ // fixup_aarch64_pcrel_branch26 - The high 26 bits of a 28-bit pc-relative
+ // immediate.
+ fixup_aarch64_pcrel_branch26,
+
+ // fixup_aarch64_pcrel_call26 - The high 26 bits of a 28-bit pc-relative
+ // immediate. Distinguished from branch26 only on ELF.
+ fixup_aarch64_pcrel_call26,
+
+ // fixup_aarch64_tlsdesc_call - zero-space placeholder for the ELF
+ // R_AARCH64_TLSDESC_CALL relocation.
+ fixup_aarch64_tlsdesc_call,
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+};
+
+} // end namespace AArch64
+} // end namespace llvm
#endif
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
index b090a55..dc4a8bf 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
@@ -13,26 +13,82 @@
#include "AArch64MCAsmInfo.h"
#include "llvm/ADT/Triple.h"
-
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
-AArch64ELFMCAsmInfo::AArch64ELFMCAsmInfo(StringRef TT) {
- Triple TheTriple(TT);
- if (TheTriple.getArch() == Triple::aarch64_be)
+enum AsmWriterVariantTy {
+ Default = -1,
+ Generic = 0,
+ Apple = 1
+};
+
+static cl::opt<AsmWriterVariantTy> AsmWriterVariant(
+ "aarch64-neon-syntax", cl::init(Default),
+ cl::desc("Choose style of NEON code to emit from AArch64 backend:"),
+ cl::values(clEnumValN(Generic, "generic", "Emit generic NEON assembly"),
+ clEnumValN(Apple, "apple", "Emit Apple-style NEON assembly"),
+ clEnumValEnd));
+
+AArch64MCAsmInfoDarwin::AArch64MCAsmInfoDarwin() {
+ // We prefer NEON instructions to be printed in the short form.
+ AssemblerDialect = AsmWriterVariant == Default ? 1 : AsmWriterVariant;
+
+ PrivateGlobalPrefix = "L";
+ SeparatorString = "%%";
+ CommentString = ";";
+ PointerSize = CalleeSaveStackSlotSize = 8;
+
+ AlignmentIsInBytes = false;
+ UsesELFSectionDirectiveForBSS = true;
+ SupportsDebugInformation = true;
+ UseDataRegionDirectives = true;
+
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+}
+
+const MCExpr *AArch64MCAsmInfoDarwin::getExprForPersonalitySymbol(
+ const MCSymbol *Sym, unsigned Encoding, MCStreamer &Streamer) const {
+ // On Darwin, we can reference dwarf symbols with foo@GOT-., which
+ // is an indirect pc-relative reference. The default implementation
+ // won't reference using the GOT, so we need this target-specific
+ // version.
+ MCContext &Context = Streamer.getContext();
+ const MCExpr *Res =
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOT, Context);
+ MCSymbol *PCSym = Context.CreateTempSymbol();
+ Streamer.EmitLabel(PCSym);
+ const MCExpr *PC = MCSymbolRefExpr::Create(PCSym, Context);
+ return MCBinaryExpr::CreateSub(Res, PC, Context);
+}
+
+AArch64MCAsmInfoELF::AArch64MCAsmInfoELF(StringRef TT) {
+ Triple T(TT);
+ if (T.getArch() == Triple::arm64_be || T.getArch() == Triple::aarch64_be)
IsLittleEndian = false;
+ // We prefer NEON instructions to be printed in the short form.
+ AssemblerDialect = AsmWriterVariant == Default ? 0 : AsmWriterVariant;
+
PointerSize = 8;
// ".comm align is in bytes but .align is pow-2."
AlignmentIsInBytes = false;
CommentString = "//";
+ PrivateGlobalPrefix = ".L";
Code32Directive = ".code\t32";
Data16bitsDirective = "\t.hword\t";
Data32bitsDirective = "\t.word\t";
Data64bitsDirective = "\t.xword\t";
+ UseDataRegionDirectives = false;
+
+ WeakRefDirective = "\t.weak\t";
+
HasLEB128 = true;
SupportsDebugInformation = true;
@@ -41,6 +97,3 @@ AArch64ELFMCAsmInfo::AArch64ELFMCAsmInfo(StringRef TT) {
UseIntegratedAssembler = true;
}
-
-// Pin the vtable to this file.
-void AArch64ELFMCAsmInfo::anchor() {}
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
index 43c0e47..42a031d 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
@@ -1,4 +1,4 @@
-//==-- AArch64MCAsmInfo.h - AArch64 asm properties -------------*- C++ -*--===//
+//=====-- AArch64MCAsmInfo.h - AArch64 asm properties ---------*- C++ -*--====//
//
// The LLVM Compiler Infrastructure
//
@@ -11,17 +11,24 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64TARGETASMINFO_H
-#define LLVM_AARCH64TARGETASMINFO_H
+#ifndef AArch64TARGETASMINFO_H
+#define AArch64TARGETASMINFO_H
-#include "llvm/MC/MCAsmInfoELF.h"
+#include "llvm/MC/MCAsmInfoDarwin.h"
namespace llvm {
+class Target;
+class StringRef;
+class MCStreamer;
+struct AArch64MCAsmInfoDarwin : public MCAsmInfoDarwin {
+ explicit AArch64MCAsmInfoDarwin();
+ const MCExpr *
+ getExprForPersonalitySymbol(const MCSymbol *Sym, unsigned Encoding,
+ MCStreamer &Streamer) const override;
+};
-struct AArch64ELFMCAsmInfo : public MCAsmInfoELF {
- explicit AArch64ELFMCAsmInfo(StringRef TT);
-private:
- virtual void anchor();
+struct AArch64MCAsmInfoELF : public MCAsmInfo {
+ explicit AArch64MCAsmInfoELF(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
index b9a61ef..464a18c 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
@@ -1,4 +1,4 @@
-//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
+//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=//
//
// The LLVM Compiler Infrastructure
//
@@ -11,10 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mccodeemitter"
+#include "MCTargetDesc/AArch64AddressingModes.h"
#include "MCTargetDesc/AArch64FixupKinds.h"
#include "MCTargetDesc/AArch64MCExpr.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
@@ -22,524 +21,562 @@
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
-
using namespace llvm;
+#define DEBUG_TYPE "mccodeemitter"
+
+STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
+STATISTIC(MCNumFixups, "Number of MC fixups created.");
+
namespace {
+
class AArch64MCCodeEmitter : public MCCodeEmitter {
- AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
- void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
MCContext &Ctx;
+ AArch64MCCodeEmitter(const AArch64MCCodeEmitter &); // DO NOT IMPLEMENT
+ void operator=(const AArch64MCCodeEmitter &); // DO NOT IMPLEMENT
public:
- AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {}
+ AArch64MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
+ MCContext &ctx)
+ : Ctx(ctx) {}
~AArch64MCCodeEmitter() {}
- unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getMachineOpValue - Return binary encoding of operand. If the machine
+ /// operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate
+ /// attached to a load, store or prfm instruction. If operand requires a
+ /// relocation, record it and return zero in that part of the encoding.
+ template <uint32_t FixupKind>
+ uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
+ /// target.
+ uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
+ /// the 2-bit shift field.
+ uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
- template<int MemSize>
- unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- return getOffsetUImm12OpValue(MI, OpIdx, Fixups, STI, MemSize);
- }
+ /// getCondBranchTargetOpValue - Return the encoded value for a conditional
+ /// branch target.
+ uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI,
- int MemSize) const;
+ /// getLoadLiteralOpValue - Return the encoded value for a load-literal
+ /// pc-relative address.
+ uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
- unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
+ /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store
+ /// instruction: bit 0 is whether a shift is present, bit 1 is whether the
+ /// operation is a sign extend (as opposed to a zero extend).
+ uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- unsigned getShiftRightImm8(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
- unsigned getShiftRightImm16(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
- unsigned getShiftRightImm32(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
- unsigned getShiftRightImm64(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
+ /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
+ /// branch target.
+ uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- unsigned getShiftLeftImm8(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
- unsigned getShiftLeftImm16(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
- unsigned getShiftLeftImm32(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
- unsigned getShiftLeftImm64(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
+ /// getBranchTargetOpValue - Return the encoded value for an unconditional
+ /// branch target.
+ uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- // Labels are handled mostly the same way: a symbol is needed, and
- // just gets some fixup attached.
- template<AArch64::Fixups fixupDesired>
- unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
+ /// getMoveWideImmOpValue - Return the encoded value for the immediate operand
+ /// of a MOVZ or MOVK instruction.
+ uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
+ /// getVecShifterOpValue - Return the encoded value for the vector shifter.
+ uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getMoveVecShifterOpValue - Return the encoded value for the vector move
+ /// shifter (MSL).
+ uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ /// getFixedPointScaleOpValue - Return the encoded value for the
+ // FP-to-fixed-point scale factor.
+ uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
+ uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ /// getSIMDShift64OpValue - Return the encoded value for the
+ // shift-by-immediate AdvSIMD instructions.
+ uint32_t getSIMDShift64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- unsigned getAddressWithFixup(const MCOperand &MO,
- unsigned FixupKind,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
-
+ uint32_t getSIMDShift64_32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- // getBinaryCodeForInstr - TableGen'erated function for getting the
- // binary encoding for an instruction.
- uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ uint32_t getSIMDShift32OpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
- /// getMachineOpValue - Return binary encoding of operand. If the machine
- /// operand requires relocation, record the relocation and return zero.
- unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
+ uint32_t getSIMDShift16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
- void EmitByte(unsigned char C, raw_ostream &OS) const {
- OS << (char)C;
- }
+ void EmitByte(unsigned char C, raw_ostream &OS) const { OS << (char)C; }
- void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
+ void EmitConstant(uint64_t Val, unsigned Size, raw_ostream &OS) const {
// Output the constant in little endian byte order.
- for (unsigned i = 0; i != 4; ++i) {
- EmitByte(Val & 0xff, OS);
+ for (unsigned i = 0; i != Size; ++i) {
+ EmitByte(Val & 255, OS);
Val >>= 8;
}
}
-
void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
-
- template<int hasRs, int hasRt2> unsigned
- fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue,
- const MCSubtargetInfo &STI) const;
-
- unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
- const MCSubtargetInfo &STI) const;
+ const MCSubtargetInfo &STI) const override;
unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue,
const MCSubtargetInfo &STI) const;
+ template<int hasRs, int hasRt2> unsigned
+ fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
+ unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
};
} // end anonymous namespace
-unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
- unsigned FixupKind,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- if (!MO.isExpr()) {
- // This can occur for manually decoded or constructed MCInsts, but neither
- // the assembly-parser nor instruction selection will currently produce an
- // MCInst that's not a symbol reference.
- assert(MO.isImm() && "Unexpected address requested");
- return MO.getImm();
- }
+MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new AArch64MCCodeEmitter(MCII, STI, Ctx);
+}
- const MCExpr *Expr = MO.getExpr();
- MCFixupKind Kind = MCFixupKind(FixupKind);
- Fixups.push_back(MCFixup::Create(0, Expr, Kind));
+/// getMachineOpValue - Return binary encoding of operand. If the machine
+/// operand requires relocation, record the relocation and return zero.
+unsigned
+AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ if (MO.isReg())
+ return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
+ else {
+ assert(MO.isImm() && "did not expect relocated expression");
+ return static_cast<unsigned>(MO.getImm());
+ }
+ assert(0 && "Unable to encode MCOperand!");
return 0;
}
-unsigned AArch64MCCodeEmitter::
-getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI,
- int MemSize) const {
- const MCOperand &ImmOp = MI.getOperand(OpIdx);
- if (ImmOp.isImm())
- return ImmOp.getImm();
-
- assert(ImmOp.isExpr() && "Unexpected operand type");
- const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
- unsigned FixupKind;
-
-
- switch (Expr->getKind()) {
- default: llvm_unreachable("Unexpected operand modifier");
- case AArch64MCExpr::VK_AARCH64_LO12: {
- static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
- AArch64::fixup_a64_ldst16_lo12,
- AArch64::fixup_a64_ldst32_lo12,
- AArch64::fixup_a64_ldst64_lo12,
- AArch64::fixup_a64_ldst128_lo12 };
- assert(MemSize <= 16 && "Invalid fixup for operation");
- FixupKind = FixupsBySize[Log2_32(MemSize)];
- break;
- }
- case AArch64MCExpr::VK_AARCH64_GOT_LO12:
- assert(MemSize == 8 && "Invalid fixup for operation");
- FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
- break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: {
- static const unsigned FixupsBySize[] = {
- AArch64::fixup_a64_ldst8_dtprel_lo12,
- AArch64::fixup_a64_ldst16_dtprel_lo12,
- AArch64::fixup_a64_ldst32_dtprel_lo12,
- AArch64::fixup_a64_ldst64_dtprel_lo12
- };
- assert(MemSize <= 8 && "Invalid fixup for operation");
- FixupKind = FixupsBySize[Log2_32(MemSize)];
- break;
- }
- case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
- static const unsigned FixupsBySize[] = {
- AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
- AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
- AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
- AArch64::fixup_a64_ldst64_dtprel_lo12_nc
- };
- assert(MemSize <= 8 && "Invalid fixup for operation");
- FixupKind = FixupsBySize[Log2_32(MemSize)];
- break;
- }
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
- assert(MemSize == 8 && "Invalid fixup for operation");
- FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
- break;
- case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
- static const unsigned FixupsBySize[] = {
- AArch64::fixup_a64_ldst8_tprel_lo12,
- AArch64::fixup_a64_ldst16_tprel_lo12,
- AArch64::fixup_a64_ldst32_tprel_lo12,
- AArch64::fixup_a64_ldst64_tprel_lo12
- };
- assert(MemSize <= 8 && "Invalid fixup for operation");
- FixupKind = FixupsBySize[Log2_32(MemSize)];
- break;
- }
- case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
- static const unsigned FixupsBySize[] = {
- AArch64::fixup_a64_ldst8_tprel_lo12_nc,
- AArch64::fixup_a64_ldst16_tprel_lo12_nc,
- AArch64::fixup_a64_ldst32_tprel_lo12_nc,
- AArch64::fixup_a64_ldst64_tprel_lo12_nc
- };
- assert(MemSize <= 8 && "Invalid fixup for operation");
- FixupKind = FixupsBySize[Log2_32(MemSize)];
- break;
- }
- case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
- assert(MemSize == 8 && "Invalid fixup for operation");
- FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
- break;
+template<unsigned FixupKind> uint32_t
+AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ uint32_t ImmVal = 0;
+
+ if (MO.isImm())
+ ImmVal = static_cast<uint32_t>(MO.getImm());
+ else {
+ assert(MO.isExpr() && "unable to encode load/store imm operand");
+ MCFixupKind Kind = MCFixupKind(FixupKind);
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc()));
+ ++MCNumFixups;
}
- return getAddressWithFixup(ImmOp, FixupKind, Fixups, STI);
+ return ImmVal;
}
-unsigned
-AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
+/// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
+/// target.
+uint32_t
+AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpIdx);
+
+ // If the destination is an immediate, we have nothing to do.
if (MO.isImm())
- return static_cast<unsigned>(MO.getImm());
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected target type!");
+ const MCExpr *Expr = MO.getExpr();
- assert(MO.isExpr());
-
- unsigned FixupKind = 0;
- switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
- default: llvm_unreachable("Invalid expression modifier");
- case AArch64MCExpr::VK_AARCH64_LO12:
- FixupKind = AArch64::fixup_a64_add_lo12; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
- FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
- FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
- FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
- FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
- FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
- FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
- case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
- FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
- }
+ MCFixupKind Kind = MI.getOpcode() == AArch64::ADR
+ ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21)
+ : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
- return getAddressWithFixup(MO, FixupKind, Fixups, STI);
-}
+ MCNumFixups += 1;
-unsigned
-AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
+ // All of the information is in the fixup.
+ return 0;
+}
+/// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
+/// the 2-bit shift field. The shift field is stored in bits 13-14 of the
+/// return value.
+uint32_t
+AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Suboperands are [imm, shifter].
const MCOperand &MO = MI.getOperand(OpIdx);
+ const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
+ assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL &&
+ "unexpected shift type for add/sub immediate");
+ unsigned ShiftVal = AArch64_AM::getShiftValue(MO1.getImm());
+ assert((ShiftVal == 0 || ShiftVal == 12) &&
+ "unexpected shift value for add/sub immediate");
if (MO.isImm())
- return static_cast<unsigned>(MO.getImm());
-
- assert(MO.isExpr());
+ return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << 12));
+ assert(MO.isExpr() && "Unable to encode MCOperand!");
+ const MCExpr *Expr = MO.getExpr();
- unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
- if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
- Modifier = Expr->getKind();
+ // Encode the 12 bits of the fixup.
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
- unsigned FixupKind = 0;
- switch(Modifier) {
- case AArch64MCExpr::VK_AARCH64_None:
- FixupKind = AArch64::fixup_a64_adr_prel_page;
- break;
- case AArch64MCExpr::VK_AARCH64_GOT:
- FixupKind = AArch64::fixup_a64_adr_prel_got_page;
- break;
- case AArch64MCExpr::VK_AARCH64_GOTTPREL:
- FixupKind = AArch64::fixup_a64_adr_gottprel_page;
- break;
- case AArch64MCExpr::VK_AARCH64_TLSDESC:
- FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
- break;
- default:
- llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
- }
+ ++MCNumFixups;
- return getAddressWithFixup(MO, FixupKind, Fixups, STI);
+ return 0;
}
-unsigned
-AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
-
+/// getCondBranchTargetOpValue - Return the encoded value for a conditional
+/// branch target.
+uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpIdx);
- assert(MO.isImm() && "Only immediate expected for shift");
- return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
-}
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected target type!");
-unsigned
-AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19);
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc()));
- const MCOperand &MO = MI.getOperand(OpIdx);
- assert(MO.isImm() && "Only immediate expected for shift");
+ ++MCNumFixups;
- return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
+ // All of the information is in the fixup.
+ return 0;
}
-unsigned AArch64MCCodeEmitter::getShiftRightImm8(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- return 8 - MI.getOperand(Op).getImm();
-}
+/// getLoadLiteralOpValue - Return the encoded value for a load-literal
+/// pc-relative address.
+uint32_t
+AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
-unsigned AArch64MCCodeEmitter::getShiftRightImm16(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- return 16 - MI.getOperand(Op).getImm();
-}
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected target type!");
-unsigned AArch64MCCodeEmitter::getShiftRightImm32(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- return 32 - MI.getOperand(Op).getImm();
-}
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19);
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc()));
-unsigned AArch64MCCodeEmitter::getShiftRightImm64(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- return 64 - MI.getOperand(Op).getImm();
-}
+ ++MCNumFixups;
-unsigned AArch64MCCodeEmitter::getShiftLeftImm8(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- return MI.getOperand(Op).getImm() - 8;
+ // All of the information is in the fixup.
+ return 0;
}
-unsigned AArch64MCCodeEmitter::getShiftLeftImm16(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- return MI.getOperand(Op).getImm() - 16;
+uint32_t
+AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ unsigned SignExtend = MI.getOperand(OpIdx).getImm();
+ unsigned DoShift = MI.getOperand(OpIdx + 1).getImm();
+ return (SignExtend << 1) | DoShift;
}
-unsigned AArch64MCCodeEmitter::getShiftLeftImm32(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- return MI.getOperand(Op).getImm() - 32;
-}
+uint32_t
+AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
-unsigned AArch64MCCodeEmitter::getShiftLeftImm64(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- return MI.getOperand(Op).getImm() - 64;
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected movz/movk immediate");
+
+ Fixups.push_back(MCFixup::Create(
+ 0, MO.getExpr(), MCFixupKind(AArch64::fixup_aarch64_movw), MI.getLoc()));
+
+ ++MCNumFixups;
+
+ return 0;
}
-template<AArch64::Fixups fixupDesired> unsigned
-AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
- unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
+/// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
+/// branch target.
+uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpIdx);
- if (MO.isExpr())
- return getAddressWithFixup(MO, fixupDesired, Fixups, STI);
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected ADR target type!");
- assert(MO.isImm());
- return MO.getImm();
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14);
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc()));
+
+ ++MCNumFixups;
+
+ // All of the information is in the fixup.
+ return 0;
}
-unsigned
-AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
- unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
+/// getBranchTargetOpValue - Return the encoded value for an unconditional
+/// branch target.
+uint32_t
+AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpIdx);
+ // If the destination is an immediate, we have nothing to do.
if (MO.isImm())
return MO.getImm();
+ assert(MO.isExpr() && "Unexpected ADR target type!");
- assert(MO.isExpr());
+ MCFixupKind Kind = MI.getOpcode() == AArch64::BL
+ ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26)
+ : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26);
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc()));
- unsigned FixupKind;
- if (isa<AArch64MCExpr>(MO.getExpr())) {
- assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
- == AArch64MCExpr::VK_AARCH64_GOTTPREL
- && "Invalid symbol modifier for literal load");
- FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
- } else {
- FixupKind = AArch64::fixup_a64_ld_prel;
- }
+ ++MCNumFixups;
- return getAddressWithFixup(MO, FixupKind, Fixups, STI);
+ // All of the information is in the fixup.
+ return 0;
}
+/// getVecShifterOpValue - Return the encoded value for the vector shifter:
+///
+/// 00 -> 0
+/// 01 -> 8
+/// 10 -> 16
+/// 11 -> 24
+uint32_t
+AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the shift amount!");
-unsigned
-AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
- const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- if (MO.isReg()) {
- return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
- } else if (MO.isImm()) {
- return static_cast<unsigned>(MO.getImm());
+ switch (MO.getImm()) {
+ default:
+ break;
+ case 0:
+ return 0;
+ case 8:
+ return 1;
+ case 16:
+ return 2;
+ case 24:
+ return 3;
}
- llvm_unreachable("Unable to encode MCOperand!");
+ assert(false && "Invalid value for vector shift amount!");
return 0;
}
-unsigned
-AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- const MCOperand &UImm16MO = MI.getOperand(OpIdx);
- const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
+uint32_t
+AArch64MCCodeEmitter::getSIMDShift64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the shift amount!");
+ return 64 - (MO.getImm());
+}
- unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
+uint32_t AArch64MCCodeEmitter::getSIMDShift64_32OpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the shift amount!");
+ return 64 - (MO.getImm() | 32);
+}
- if (UImm16MO.isImm()) {
- Result |= UImm16MO.getImm();
- return Result;
- }
+uint32_t
+AArch64MCCodeEmitter::getSIMDShift32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the shift amount!");
+ return 32 - (MO.getImm() | 16);
+}
- const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
- AArch64::Fixups requestedFixup;
- switch (A64E->getKind()) {
- default: llvm_unreachable("unexpected expression modifier");
- case AArch64MCExpr::VK_AARCH64_ABS_G0:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G1:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G2:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G3:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
- case AArch64MCExpr::VK_AARCH64_SABS_G0:
- requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
- case AArch64MCExpr::VK_AARCH64_SABS_G1:
- requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
- case AArch64MCExpr::VK_AARCH64_SABS_G2:
- requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
- requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
- requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
- requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
- requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
- requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
- requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
- requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_G2:
- requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_G1:
- requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
- requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_G0:
- requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
- requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
- }
+uint32_t
+AArch64MCCodeEmitter::getSIMDShift16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the shift amount!");
+ return 16 - (MO.getImm() | 8);
+}
- return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups, STI);
+/// getFixedPointScaleOpValue - Return the encoded value for the
+// FP-to-fixed-point scale factor.
+uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 64 - MO.getImm();
}
-template<int hasRs, int hasRt2> unsigned
-AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
- unsigned EncodedValue,
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
- if (!hasRs) EncodedValue |= 0x001F0000;
- if (!hasRt2) EncodedValue |= 0x00007C00;
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 64 - MO.getImm();
+}
- return EncodedValue;
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 32 - MO.getImm();
}
-unsigned
-AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
- const MCSubtargetInfo &STI) const {
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 16 - MO.getImm();
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 8 - MO.getImm();
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 64;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 32;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 16;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 8;
+}
+
+/// getMoveVecShifterOpValue - Return the encoded value for the vector move
+/// shifter (MSL).
+uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() &&
+ "Expected an immediate value for the move shift amount!");
+ unsigned ShiftVal = AArch64_AM::getShiftValue(MO.getImm());
+ assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!");
+ return ShiftVal == 8 ? 0 : 1;
+}
+
+unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
// If one of the signed fixup kinds is applied to a MOVZ instruction, the
// eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
// job to ensure that any bits possibly affected by this are 0. This means we
@@ -552,23 +589,38 @@ AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
switch (A64E->getKind()) {
- case AArch64MCExpr::VK_AARCH64_SABS_G0:
- case AArch64MCExpr::VK_AARCH64_SABS_G1:
- case AArch64MCExpr::VK_AARCH64_SABS_G2:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
- case AArch64MCExpr::VK_AARCH64_TPREL_G2:
- case AArch64MCExpr::VK_AARCH64_TPREL_G1:
- case AArch64MCExpr::VK_AARCH64_TPREL_G0:
+ case AArch64MCExpr::VK_DTPREL_G2:
+ case AArch64MCExpr::VK_DTPREL_G1:
+ case AArch64MCExpr::VK_DTPREL_G0:
+ case AArch64MCExpr::VK_GOTTPREL_G1:
+ case AArch64MCExpr::VK_TPREL_G2:
+ case AArch64MCExpr::VK_TPREL_G1:
+ case AArch64MCExpr::VK_TPREL_G0:
return EncodedValue & ~(1u << 30);
default:
// Nothing to do for an unsigned fixup.
return EncodedValue;
}
- llvm_unreachable("Should have returned by now");
+
+ return EncodedValue & ~(1u << 30);
+}
+
+void AArch64MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ if (MI.getOpcode() == AArch64::TLSDESCCALL) {
+ // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
+ // following (BLR) instruction. It doesn't emit any code itself so it
+ // doesn't go through the normal TableGenerated channels.
+ MCFixupKind Fixup = MCFixupKind(AArch64::fixup_aarch64_tlsdesc_call);
+ Fixups.push_back(MCFixup::Create(0, MI.getOperand(0).getExpr(), Fixup));
+ return;
+ }
+
+ uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
+ EmitConstant(Binary, 4, OS);
+ ++MCNumEmitted; // Keep track of the # of mi's emitted.
}
unsigned
@@ -581,32 +633,22 @@ AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
return EncodedValue;
}
-MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI,
- MCContext &Ctx) {
- return new AArch64MCCodeEmitter(Ctx);
-}
-
-void AArch64MCCodeEmitter::
-EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- if (MI.getOpcode() == AArch64::TLSDESCCALL) {
- // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
- // following (BLR) instruction. It doesn't emit any code itself so it
- // doesn't go through the normal TableGenerated channels.
- MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
- const MCExpr *Expr;
- Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
- Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
- return;
- }
-
- uint32_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
+template<int hasRs, int hasRt2> unsigned
+AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
+ if (!hasRs) EncodedValue |= 0x001F0000;
+ if (!hasRt2) EncodedValue |= 0x00007C00;
- EmitInstruction(Binary, OS);
+ return EncodedValue;
}
+unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison(
+ const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const {
+ // The Rm field of FCMP and friends is unused - it should be assembled
+ // as 0, but is ignored by the processor.
+ EncodedValue &= ~(0x1f << 16);
+ return EncodedValue;
+}
#include "AArch64GenMCCodeEmitter.inc"
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
index c7ccaee..85c3ec7 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
@@ -12,74 +12,121 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "aarch64mcexpr"
#include "AArch64MCExpr.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELF.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCValue.h"
#include "llvm/Object/ELF.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
-const AArch64MCExpr*
-AArch64MCExpr::Create(VariantKind Kind, const MCExpr *Expr,
- MCContext &Ctx) {
- return new (Ctx) AArch64MCExpr(Kind, Expr);
+#define DEBUG_TYPE "aarch64symbolrefexpr"
+
+const AArch64MCExpr *AArch64MCExpr::Create(const MCExpr *Expr, VariantKind Kind,
+ MCContext &Ctx) {
+ return new (Ctx) AArch64MCExpr(Expr, Kind);
+}
+
+StringRef AArch64MCExpr::getVariantKindName() const {
+ switch (static_cast<uint32_t>(getKind())) {
+ case VK_CALL: return "";
+ case VK_LO12: return ":lo12:";
+ case VK_ABS_G3: return ":abs_g3:";
+ case VK_ABS_G2: return ":abs_g2:";
+ case VK_ABS_G2_S: return ":abs_g2_s:";
+ case VK_ABS_G2_NC: return ":abs_g2_nc:";
+ case VK_ABS_G1: return ":abs_g1:";
+ case VK_ABS_G1_S: return ":abs_g1_s:";
+ case VK_ABS_G1_NC: return ":abs_g1_nc:";
+ case VK_ABS_G0: return ":abs_g0:";
+ case VK_ABS_G0_S: return ":abs_g0_s:";
+ case VK_ABS_G0_NC: return ":abs_g0_nc:";
+ case VK_DTPREL_G2: return ":dtprel_g2:";
+ case VK_DTPREL_G1: return ":dtprel_g1:";
+ case VK_DTPREL_G1_NC: return ":dtprel_g1_nc:";
+ case VK_DTPREL_G0: return ":dtprel_g0:";
+ case VK_DTPREL_G0_NC: return ":dtprel_g0_nc:";
+ case VK_DTPREL_HI12: return ":dtprel_hi12:";
+ case VK_DTPREL_LO12: return ":dtprel_lo12:";
+ case VK_DTPREL_LO12_NC: return ":dtprel_lo12_nc:";
+ case VK_TPREL_G2: return ":tprel_g2:";
+ case VK_TPREL_G1: return ":tprel_g1:";
+ case VK_TPREL_G1_NC: return ":tprel_g1_nc:";
+ case VK_TPREL_G0: return ":tprel_g0:";
+ case VK_TPREL_G0_NC: return ":tprel_g0_nc:";
+ case VK_TPREL_HI12: return ":tprel_hi12:";
+ case VK_TPREL_LO12: return ":tprel_lo12:";
+ case VK_TPREL_LO12_NC: return ":tprel_lo12_nc:";
+ case VK_TLSDESC_LO12: return ":tlsdesc_lo12:";
+ case VK_ABS_PAGE: return "";
+ case VK_GOT_PAGE: return ":got:";
+ case VK_GOT_LO12: return ":got_lo12:";
+ case VK_GOTTPREL_PAGE: return ":gottprel:";
+ case VK_GOTTPREL_LO12_NC: return ":gottprel_lo12:";
+ case VK_GOTTPREL_G1: return ":gottprel_g1:";
+ case VK_GOTTPREL_G0_NC: return ":gottprel_g0_nc:";
+ case VK_TLSDESC: return "";
+ case VK_TLSDESC_PAGE: return ":tlsdesc:";
+ default:
+ llvm_unreachable("Invalid ELF symbol kind");
+ }
}
void AArch64MCExpr::PrintImpl(raw_ostream &OS) const {
- switch (Kind) {
- default: llvm_unreachable("Invalid kind!");
- case VK_AARCH64_GOT: OS << ":got:"; break;
- case VK_AARCH64_GOT_LO12: OS << ":got_lo12:"; break;
- case VK_AARCH64_LO12: OS << ":lo12:"; break;
- case VK_AARCH64_ABS_G0: OS << ":abs_g0:"; break;
- case VK_AARCH64_ABS_G0_NC: OS << ":abs_g0_nc:"; break;
- case VK_AARCH64_ABS_G1: OS << ":abs_g1:"; break;
- case VK_AARCH64_ABS_G1_NC: OS << ":abs_g1_nc:"; break;
- case VK_AARCH64_ABS_G2: OS << ":abs_g2:"; break;
- case VK_AARCH64_ABS_G2_NC: OS << ":abs_g2_nc:"; break;
- case VK_AARCH64_ABS_G3: OS << ":abs_g3:"; break;
- case VK_AARCH64_SABS_G0: OS << ":abs_g0_s:"; break;
- case VK_AARCH64_SABS_G1: OS << ":abs_g1_s:"; break;
- case VK_AARCH64_SABS_G2: OS << ":abs_g2_s:"; break;
- case VK_AARCH64_DTPREL_G2: OS << ":dtprel_g2:"; break;
- case VK_AARCH64_DTPREL_G1: OS << ":dtprel_g1:"; break;
- case VK_AARCH64_DTPREL_G1_NC: OS << ":dtprel_g1_nc:"; break;
- case VK_AARCH64_DTPREL_G0: OS << ":dtprel_g0:"; break;
- case VK_AARCH64_DTPREL_G0_NC: OS << ":dtprel_g0_nc:"; break;
- case VK_AARCH64_DTPREL_HI12: OS << ":dtprel_hi12:"; break;
- case VK_AARCH64_DTPREL_LO12: OS << ":dtprel_lo12:"; break;
- case VK_AARCH64_DTPREL_LO12_NC: OS << ":dtprel_lo12_nc:"; break;
- case VK_AARCH64_GOTTPREL_G1: OS << ":gottprel_g1:"; break;
- case VK_AARCH64_GOTTPREL_G0_NC: OS << ":gottprel_g0_nc:"; break;
- case VK_AARCH64_GOTTPREL: OS << ":gottprel:"; break;
- case VK_AARCH64_GOTTPREL_LO12: OS << ":gottprel_lo12:"; break;
- case VK_AARCH64_TPREL_G2: OS << ":tprel_g2:"; break;
- case VK_AARCH64_TPREL_G1: OS << ":tprel_g1:"; break;
- case VK_AARCH64_TPREL_G1_NC: OS << ":tprel_g1_nc:"; break;
- case VK_AARCH64_TPREL_G0: OS << ":tprel_g0:"; break;
- case VK_AARCH64_TPREL_G0_NC: OS << ":tprel_g0_nc:"; break;
- case VK_AARCH64_TPREL_HI12: OS << ":tprel_hi12:"; break;
- case VK_AARCH64_TPREL_LO12: OS << ":tprel_lo12:"; break;
- case VK_AARCH64_TPREL_LO12_NC: OS << ":tprel_lo12_nc:"; break;
- case VK_AARCH64_TLSDESC: OS << ":tlsdesc:"; break;
- case VK_AARCH64_TLSDESC_LO12: OS << ":tlsdesc_lo12:"; break;
+ if (getKind() != VK_NONE)
+ OS << getVariantKindName();
+ OS << *Expr;
+}
+
+// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps
+// that method should be made public?
+// FIXME: really do above: now that two backends are using it.
+static void AddValueSymbolsImpl(const MCExpr *Value, MCAssembler *Asm) {
+ switch (Value->getKind()) {
+ case MCExpr::Target:
+ llvm_unreachable("Can't handle nested target expr!");
+ break;
+
+ case MCExpr::Constant:
+ break;
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
+ AddValueSymbolsImpl(BE->getLHS(), Asm);
+ AddValueSymbolsImpl(BE->getRHS(), Asm);
+ break;
+ }
+
+ case MCExpr::SymbolRef:
+ Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
+ break;
+
+ case MCExpr::Unary:
+ AddValueSymbolsImpl(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm);
+ break;
}
+}
+
+void AArch64MCExpr::AddValueSymbols(MCAssembler *Asm) const {
+ AddValueSymbolsImpl(getSubExpr(), Asm);
+}
- const MCExpr *Expr = getSubExpr();
- if (Expr->getKind() != MCExpr::SymbolRef)
- OS << '(';
- Expr->print(OS);
- if (Expr->getKind() != MCExpr::SymbolRef)
- OS << ')';
+const MCSection *AArch64MCExpr::FindAssociatedSection() const {
+ llvm_unreachable("FIXME: what goes here?");
}
-bool
-AArch64MCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
- const MCAsmLayout *Layout) const {
- return getSubExpr()->EvaluateAsRelocatable(Res, Layout);
+bool AArch64MCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout) const {
+ if (!getSubExpr()->EvaluateAsRelocatable(Res, Layout))
+ return false;
+
+ Res =
+ MCValue::get(Res.getSymA(), Res.getSymB(), Res.getConstant(), getKind());
+
+ return true;
}
static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) {
@@ -113,66 +160,15 @@ static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) {
}
void AArch64MCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
- switch (getKind()) {
+ switch (getSymbolLoc(Kind)) {
default:
return;
- case VK_AARCH64_DTPREL_G2:
- case VK_AARCH64_DTPREL_G1:
- case VK_AARCH64_DTPREL_G1_NC:
- case VK_AARCH64_DTPREL_G0:
- case VK_AARCH64_DTPREL_G0_NC:
- case VK_AARCH64_DTPREL_HI12:
- case VK_AARCH64_DTPREL_LO12:
- case VK_AARCH64_DTPREL_LO12_NC:
- case VK_AARCH64_GOTTPREL_G1:
- case VK_AARCH64_GOTTPREL_G0_NC:
- case VK_AARCH64_GOTTPREL:
- case VK_AARCH64_GOTTPREL_LO12:
- case VK_AARCH64_TPREL_G2:
- case VK_AARCH64_TPREL_G1:
- case VK_AARCH64_TPREL_G1_NC:
- case VK_AARCH64_TPREL_G0:
- case VK_AARCH64_TPREL_G0_NC:
- case VK_AARCH64_TPREL_HI12:
- case VK_AARCH64_TPREL_LO12:
- case VK_AARCH64_TPREL_LO12_NC:
- case VK_AARCH64_TLSDESC:
- case VK_AARCH64_TLSDESC_LO12:
+ case VK_DTPREL:
+ case VK_GOTTPREL:
+ case VK_TPREL:
+ case VK_TLSDESC:
break;
}
fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm);
}
-
-// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps
-// that method should be made public?
-// FIXME: really do above: now that two backends are using it.
-static void AddValueSymbolsImpl(const MCExpr *Value, MCAssembler *Asm) {
- switch (Value->getKind()) {
- case MCExpr::Target:
- llvm_unreachable("Can't handle nested target expr!");
- break;
-
- case MCExpr::Constant:
- break;
-
- case MCExpr::Binary: {
- const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
- AddValueSymbolsImpl(BE->getLHS(), Asm);
- AddValueSymbolsImpl(BE->getRHS(), Asm);
- break;
- }
-
- case MCExpr::SymbolRef:
- Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
- break;
-
- case MCExpr::Unary:
- AddValueSymbolsImpl(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm);
- break;
- }
-}
-
-void AArch64MCExpr::AddValueSymbols(MCAssembler *Asm) const {
- AddValueSymbolsImpl(getSubExpr(), Asm);
-}
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h b/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
index d9798ae..e869ed0 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
@@ -1,4 +1,4 @@
-//==- AArch64MCExpr.h - AArch64 specific MC expression classes --*- C++ -*-===//
+//=--- AArch64MCExpr.h - AArch64 specific MC expression classes ---*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -12,168 +12,149 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64MCEXPR_H
-#define LLVM_AARCH64MCEXPR_H
+#ifndef LLVM_AArch64MCEXPR_H
+#define LLVM_AArch64MCEXPR_H
#include "llvm/MC/MCExpr.h"
+#include "llvm/Support/ErrorHandling.h"
namespace llvm {
class AArch64MCExpr : public MCTargetExpr {
public:
enum VariantKind {
- VK_AARCH64_None,
- VK_AARCH64_GOT, // :got: modifier in assembly
- VK_AARCH64_GOT_LO12, // :got_lo12:
- VK_AARCH64_LO12, // :lo12:
-
- VK_AARCH64_ABS_G0, // :abs_g0:
- VK_AARCH64_ABS_G0_NC, // :abs_g0_nc:
- VK_AARCH64_ABS_G1,
- VK_AARCH64_ABS_G1_NC,
- VK_AARCH64_ABS_G2,
- VK_AARCH64_ABS_G2_NC,
- VK_AARCH64_ABS_G3,
-
- VK_AARCH64_SABS_G0, // :abs_g0_s:
- VK_AARCH64_SABS_G1,
- VK_AARCH64_SABS_G2,
-
- VK_AARCH64_DTPREL_G2, // :dtprel_g2:
- VK_AARCH64_DTPREL_G1,
- VK_AARCH64_DTPREL_G1_NC,
- VK_AARCH64_DTPREL_G0,
- VK_AARCH64_DTPREL_G0_NC,
- VK_AARCH64_DTPREL_HI12,
- VK_AARCH64_DTPREL_LO12,
- VK_AARCH64_DTPREL_LO12_NC,
-
- VK_AARCH64_GOTTPREL_G1, // :gottprel:
- VK_AARCH64_GOTTPREL_G0_NC,
- VK_AARCH64_GOTTPREL,
- VK_AARCH64_GOTTPREL_LO12,
-
- VK_AARCH64_TPREL_G2, // :tprel:
- VK_AARCH64_TPREL_G1,
- VK_AARCH64_TPREL_G1_NC,
- VK_AARCH64_TPREL_G0,
- VK_AARCH64_TPREL_G0_NC,
- VK_AARCH64_TPREL_HI12,
- VK_AARCH64_TPREL_LO12,
- VK_AARCH64_TPREL_LO12_NC,
-
- VK_AARCH64_TLSDESC, // :tlsdesc:
- VK_AARCH64_TLSDESC_LO12
+ VK_NONE = 0x000,
+
+ // Symbol locations specifying (roughly speaking) what calculation should be
+ // performed to construct the final address for the relocated
+ // symbol. E.g. direct, via the GOT, ...
+ VK_ABS = 0x001,
+ VK_SABS = 0x002,
+ VK_GOT = 0x003,
+ VK_DTPREL = 0x004,
+ VK_GOTTPREL = 0x005,
+ VK_TPREL = 0x006,
+ VK_TLSDESC = 0x007,
+ VK_SymLocBits = 0x00f,
+
+ // Variants specifying which part of the final address calculation is
+ // used. E.g. the low 12 bits for an ADD/LDR, the middle 16 bits for a
+ // MOVZ/MOVK.
+ VK_PAGE = 0x010,
+ VK_PAGEOFF = 0x020,
+ VK_HI12 = 0x030,
+ VK_G0 = 0x040,
+ VK_G1 = 0x050,
+ VK_G2 = 0x060,
+ VK_G3 = 0x070,
+ VK_AddressFragBits = 0x0f0,
+
+ // Whether the final relocation is a checked one (where a linker should
+ // perform a range-check on the final address) or not. Note that this field
+ // is unfortunately sometimes omitted from the assembly syntax. E.g. :lo12:
+ // on its own is a non-checked relocation. We side with ELF on being
+ // explicit about this!
+ VK_NC = 0x100,
+
+ // Convenience definitions for referring to specific textual representations
+ // of relocation specifiers. Note that this means the "_NC" is sometimes
+ // omitted in line with assembly syntax here (VK_LO12 rather than VK_LO12_NC
+ // since a user would write ":lo12:").
+ VK_CALL = VK_ABS,
+ VK_ABS_PAGE = VK_ABS | VK_PAGE,
+ VK_ABS_G3 = VK_ABS | VK_G3,
+ VK_ABS_G2 = VK_ABS | VK_G2,
+ VK_ABS_G2_S = VK_SABS | VK_G2,
+ VK_ABS_G2_NC = VK_ABS | VK_G2 | VK_NC,
+ VK_ABS_G1 = VK_ABS | VK_G1,
+ VK_ABS_G1_S = VK_SABS | VK_G1,
+ VK_ABS_G1_NC = VK_ABS | VK_G1 | VK_NC,
+ VK_ABS_G0 = VK_ABS | VK_G0,
+ VK_ABS_G0_S = VK_SABS | VK_G0,
+ VK_ABS_G0_NC = VK_ABS | VK_G0 | VK_NC,
+ VK_LO12 = VK_ABS | VK_PAGEOFF | VK_NC,
+ VK_GOT_LO12 = VK_GOT | VK_PAGEOFF | VK_NC,
+ VK_GOT_PAGE = VK_GOT | VK_PAGE,
+ VK_DTPREL_G2 = VK_DTPREL | VK_G2,
+ VK_DTPREL_G1 = VK_DTPREL | VK_G1,
+ VK_DTPREL_G1_NC = VK_DTPREL | VK_G1 | VK_NC,
+ VK_DTPREL_G0 = VK_DTPREL | VK_G0,
+ VK_DTPREL_G0_NC = VK_DTPREL | VK_G0 | VK_NC,
+ VK_DTPREL_HI12 = VK_DTPREL | VK_HI12,
+ VK_DTPREL_LO12 = VK_DTPREL | VK_PAGEOFF,
+ VK_DTPREL_LO12_NC = VK_DTPREL | VK_PAGEOFF | VK_NC,
+ VK_GOTTPREL_PAGE = VK_GOTTPREL | VK_PAGE,
+ VK_GOTTPREL_LO12_NC = VK_GOTTPREL | VK_PAGEOFF | VK_NC,
+ VK_GOTTPREL_G1 = VK_GOTTPREL | VK_G1,
+ VK_GOTTPREL_G0_NC = VK_GOTTPREL | VK_G0 | VK_NC,
+ VK_TPREL_G2 = VK_TPREL | VK_G2,
+ VK_TPREL_G1 = VK_TPREL | VK_G1,
+ VK_TPREL_G1_NC = VK_TPREL | VK_G1 | VK_NC,
+ VK_TPREL_G0 = VK_TPREL | VK_G0,
+ VK_TPREL_G0_NC = VK_TPREL | VK_G0 | VK_NC,
+ VK_TPREL_HI12 = VK_TPREL | VK_HI12,
+ VK_TPREL_LO12 = VK_TPREL | VK_PAGEOFF,
+ VK_TPREL_LO12_NC = VK_TPREL | VK_PAGEOFF | VK_NC,
+ VK_TLSDESC_LO12 = VK_TLSDESC | VK_PAGEOFF | VK_NC,
+ VK_TLSDESC_PAGE = VK_TLSDESC | VK_PAGE,
+
+ VK_INVALID = 0xfff
};
private:
- const VariantKind Kind;
const MCExpr *Expr;
+ const VariantKind Kind;
- explicit AArch64MCExpr(VariantKind _Kind, const MCExpr *_Expr)
- : Kind(_Kind), Expr(_Expr) {}
+ explicit AArch64MCExpr(const MCExpr *Expr, VariantKind Kind)
+ : Expr(Expr), Kind(Kind) {}
public:
/// @name Construction
/// @{
- static const AArch64MCExpr *Create(VariantKind Kind, const MCExpr *Expr,
- MCContext &Ctx);
-
- static const AArch64MCExpr *CreateLo12(const MCExpr *Expr, MCContext &Ctx) {
- return Create(VK_AARCH64_LO12, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateGOT(const MCExpr *Expr, MCContext &Ctx) {
- return Create(VK_AARCH64_GOT, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateGOTLo12(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_GOT_LO12, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateDTPREL_G1(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_DTPREL_G1, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateDTPREL_G0_NC(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_DTPREL_G0_NC, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateGOTTPREL(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_GOTTPREL, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateGOTTPRELLo12(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_GOTTPREL_LO12, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateTLSDesc(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_TLSDESC, Expr, Ctx);
- }
+ static const AArch64MCExpr *Create(const MCExpr *Expr, VariantKind Kind,
+ MCContext &Ctx);
- static const AArch64MCExpr *CreateTLSDescLo12(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_TLSDESC_LO12, Expr, Ctx);
- }
+ /// @}
+ /// @name Accessors
+ /// @{
- static const AArch64MCExpr *CreateTPREL_G1(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_TPREL_G1, Expr, Ctx);
- }
+ /// Get the kind of this expression.
+ VariantKind getKind() const { return static_cast<VariantKind>(Kind); }
- static const AArch64MCExpr *CreateTPREL_G0_NC(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_TPREL_G0_NC, Expr, Ctx);
- }
+ /// Get the expression this modifier applies to.
+ const MCExpr *getSubExpr() const { return Expr; }
- static const AArch64MCExpr *CreateABS_G3(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_ABS_G3, Expr, Ctx);
- }
+ /// @}
+ /// @name VariantKind information extractors.
+ /// @{
- static const AArch64MCExpr *CreateABS_G2_NC(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_ABS_G2_NC, Expr, Ctx);
+ static VariantKind getSymbolLoc(VariantKind Kind) {
+ return static_cast<VariantKind>(Kind & VK_SymLocBits);
}
- static const AArch64MCExpr *CreateABS_G1_NC(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_ABS_G1_NC, Expr, Ctx);
+ static VariantKind getAddressFrag(VariantKind Kind) {
+ return static_cast<VariantKind>(Kind & VK_AddressFragBits);
}
- static const AArch64MCExpr *CreateABS_G0_NC(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_ABS_G0_NC, Expr, Ctx);
- }
+ static bool isNotChecked(VariantKind Kind) { return Kind & VK_NC; }
/// @}
- /// @name Accessors
- /// @{
- /// getOpcode - Get the kind of this expression.
- VariantKind getKind() const { return Kind; }
+ /// Convert the variant kind into an ELF-appropriate modifier
+ /// (e.g. ":got:", ":lo12:").
+ StringRef getVariantKindName() const;
- /// getSubExpr - Get the child of this expression.
- const MCExpr *getSubExpr() const { return Expr; }
+ void PrintImpl(raw_ostream &OS) const override;
- /// @}
+ void AddValueSymbols(MCAssembler *) const override;
+
+ const MCSection *FindAssociatedSection() const override;
- void PrintImpl(raw_ostream &OS) const;
bool EvaluateAsRelocatableImpl(MCValue &Res,
- const MCAsmLayout *Layout) const;
- void AddValueSymbols(MCAssembler *) const;
- const MCSection *FindAssociatedSection() const {
- return getSubExpr()->FindAssociatedSection();
- }
+ const MCAsmLayout *Layout) const override;
- void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const;
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override;
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
index 3d19e42..ae698c5 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- AArch64MCTargetDesc.cpp - AArch64 Target Descriptions -------------===//
+//===-- AArch64MCTargetDesc.cpp - AArch64 Target Descriptions ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,9 +15,7 @@
#include "AArch64ELFStreamer.h"
#include "AArch64MCAsmInfo.h"
#include "InstPrinter/AArch64InstPrinter.h"
-#include "llvm/ADT/APInt.h"
#include "llvm/MC/MCCodeGenInfo.h"
-#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
@@ -25,8 +23,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
-#define GET_REGINFO_MC_DESC
-#include "AArch64GenRegisterInfo.inc"
+using namespace llvm;
#define GET_INSTRINFO_MC_DESC
#include "AArch64GenInstrInfo.inc"
@@ -34,26 +31,29 @@
#define GET_SUBTARGETINFO_MC_DESC
#include "AArch64GenSubtargetInfo.inc"
-using namespace llvm;
+#define GET_REGINFO_MC_DESC
+#include "AArch64GenRegisterInfo.inc"
-MCSubtargetInfo *AArch64_MC::createAArch64MCSubtargetInfo(StringRef TT,
- StringRef CPU,
- StringRef FS) {
- MCSubtargetInfo *X = new MCSubtargetInfo();
- InitAArch64MCSubtargetInfo(X, TT, CPU, FS);
+static MCInstrInfo *createAArch64MCInstrInfo() {
+ MCInstrInfo *X = new MCInstrInfo();
+ InitAArch64MCInstrInfo(X);
return X;
}
+static MCSubtargetInfo *
+createAArch64MCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS) {
+ MCSubtargetInfo *X = new MCSubtargetInfo();
-static MCInstrInfo *createAArch64MCInstrInfo() {
- MCInstrInfo *X = new MCInstrInfo();
- InitAArch64MCInstrInfo(X);
+ if (CPU.empty())
+ CPU = "generic";
+
+ InitAArch64MCSubtargetInfo(X, TT, CPU, FS);
return X;
}
static MCRegisterInfo *createAArch64MCRegisterInfo(StringRef Triple) {
MCRegisterInfo *X = new MCRegisterInfo();
- InitAArch64MCRegisterInfo(X, AArch64::X30);
+ InitAArch64MCRegisterInfo(X, AArch64::LR);
return X;
}
@@ -61,9 +61,17 @@ static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI,
StringRef TT) {
Triple TheTriple(TT);
- MCAsmInfo *MAI = new AArch64ELFMCAsmInfo(TT);
- unsigned Reg = MRI.getDwarfRegNum(AArch64::XSP, true);
- MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 0);
+ MCAsmInfo *MAI;
+ if (TheTriple.isOSDarwin())
+ MAI = new AArch64MCAsmInfoDarwin();
+ else {
+ assert(TheTriple.isOSBinFormatELF() && "Only expect Darwin or ELF");
+ MAI = new AArch64MCAsmInfoELF(TT);
+ }
+
+ // Initial state of the frame pointer is SP.
+ unsigned Reg = MRI.getDwarfRegNum(AArch64::SP, true);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0);
MAI->addInitialFrameState(Inst);
return MAI;
@@ -72,40 +80,35 @@ static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI,
static MCCodeGenInfo *createAArch64MCCodeGenInfo(StringRef TT, Reloc::Model RM,
CodeModel::Model CM,
CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- if (RM == Reloc::Default || RM == Reloc::DynamicNoPIC) {
- // On ELF platforms the default static relocation model has a smart enough
- // linker to cope with referencing external symbols defined in a shared
- // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
- RM = Reloc::Static;
- }
+ Triple TheTriple(TT);
+ assert((TheTriple.isOSBinFormatELF() || TheTriple.isOSBinFormatMachO()) &&
+ "Only expect Darwin and ELF targets");
if (CM == CodeModel::Default)
CM = CodeModel::Small;
- else if (CM == CodeModel::JITDefault) {
- // The default MCJIT memory managers make no guarantees about where they can
- // find an executable page; JITed code needs to be able to refer to globals
- // no matter how far away they are.
+ // The default MCJIT memory managers make no guarantees about where they can
+ // find an executable page; JITed code needs to be able to refer to globals
+ // no matter how far away they are.
+ else if (CM == CodeModel::JITDefault)
CM = CodeModel::Large;
- }
+ else if (CM != CodeModel::Small && CM != CodeModel::Large)
+ report_fatal_error(
+ "Only small and large code models are allowed on AArch64");
+
+ // AArch64 Darwin is always PIC.
+ if (TheTriple.isOSDarwin())
+ RM = Reloc::PIC_;
+ // On ELF platforms the default static relocation model has a smart enough
+ // linker to cope with referencing external symbols defined in a shared
+ // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
+ else if (RM == Reloc::Default || RM == Reloc::DynamicNoPIC)
+ RM = Reloc::Static;
+ MCCodeGenInfo *X = new MCCodeGenInfo();
X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
-static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
- MCContext &Ctx, MCAsmBackend &MAB,
- raw_ostream &OS,
- MCCodeEmitter *Emitter,
- const MCSubtargetInfo &STI,
- bool RelaxAll,
- bool NoExecStack) {
- Triple TheTriple(TT);
-
- return createAArch64ELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack);
-}
-
-
static MCInstPrinter *createAArch64MCInstPrinter(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
@@ -114,108 +117,109 @@ static MCInstPrinter *createAArch64MCInstPrinter(const Target &T,
const MCSubtargetInfo &STI) {
if (SyntaxVariant == 0)
return new AArch64InstPrinter(MAI, MII, MRI, STI);
- return 0;
-}
-
-namespace {
-
-class AArch64MCInstrAnalysis : public MCInstrAnalysis {
-public:
- AArch64MCInstrAnalysis(const MCInstrInfo *Info) : MCInstrAnalysis(Info) {}
-
- virtual bool isUnconditionalBranch(const MCInst &Inst) const {
- if (Inst.getOpcode() == AArch64::Bcc
- && Inst.getOperand(0).getImm() == A64CC::AL)
- return true;
- return MCInstrAnalysis::isUnconditionalBranch(Inst);
- }
-
- virtual bool isConditionalBranch(const MCInst &Inst) const {
- if (Inst.getOpcode() == AArch64::Bcc
- && Inst.getOperand(0).getImm() == A64CC::AL)
- return false;
- return MCInstrAnalysis::isConditionalBranch(Inst);
- }
-
- bool evaluateBranch(const MCInst &Inst, uint64_t Addr,
- uint64_t Size, uint64_t &Target) const {
- unsigned LblOperand = Inst.getOpcode() == AArch64::Bcc ? 1 : 0;
- // FIXME: We only handle PCRel branches for now.
- if (Info->get(Inst.getOpcode()).OpInfo[LblOperand].OperandType
- != MCOI::OPERAND_PCREL)
- return false;
-
- int64_t Imm = Inst.getOperand(LblOperand).getImm();
- Target = Addr + Imm;
- return true;
- }
-};
+ if (SyntaxVariant == 1)
+ return new AArch64AppleInstPrinter(MAI, MII, MRI, STI);
+ return nullptr;
}
-static MCInstrAnalysis *createAArch64MCInstrAnalysis(const MCInstrInfo *Info) {
- return new AArch64MCInstrAnalysis(Info);
-}
+static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
+ MCContext &Ctx, MCAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI, bool RelaxAll,
+ bool NoExecStack) {
+ Triple TheTriple(TT);
+ if (TheTriple.isOSDarwin())
+ return createMachOStreamer(Ctx, TAB, OS, Emitter, RelaxAll,
+ /*LabelSections*/ true);
+ return createAArch64ELFStreamer(Ctx, TAB, OS, Emitter, RelaxAll, NoExecStack);
+}
+// Force static initialization.
extern "C" void LLVMInitializeAArch64TargetMC() {
// Register the MC asm info.
- RegisterMCAsmInfoFn A(TheAArch64leTarget, createAArch64MCAsmInfo);
- RegisterMCAsmInfoFn B(TheAArch64beTarget, createAArch64MCAsmInfo);
+ RegisterMCAsmInfoFn X(TheAArch64leTarget, createAArch64MCAsmInfo);
+ RegisterMCAsmInfoFn Y(TheAArch64beTarget, createAArch64MCAsmInfo);
+ RegisterMCAsmInfoFn Z(TheARM64leTarget, createAArch64MCAsmInfo);
+ RegisterMCAsmInfoFn W(TheARM64beTarget, createAArch64MCAsmInfo);
// Register the MC codegen info.
TargetRegistry::RegisterMCCodeGenInfo(TheAArch64leTarget,
createAArch64MCCodeGenInfo);
TargetRegistry::RegisterMCCodeGenInfo(TheAArch64beTarget,
createAArch64MCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(TheARM64leTarget,
+ createAArch64MCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(TheARM64beTarget,
+ createAArch64MCCodeGenInfo);
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(TheAArch64leTarget,
createAArch64MCInstrInfo);
TargetRegistry::RegisterMCInstrInfo(TheAArch64beTarget,
createAArch64MCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheARM64leTarget,
+ createAArch64MCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheARM64beTarget,
+ createAArch64MCInstrInfo);
// Register the MC register info.
TargetRegistry::RegisterMCRegInfo(TheAArch64leTarget,
createAArch64MCRegisterInfo);
TargetRegistry::RegisterMCRegInfo(TheAArch64beTarget,
createAArch64MCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(TheARM64leTarget,
+ createAArch64MCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(TheARM64beTarget,
+ createAArch64MCRegisterInfo);
// Register the MC subtarget info.
- using AArch64_MC::createAArch64MCSubtargetInfo;
TargetRegistry::RegisterMCSubtargetInfo(TheAArch64leTarget,
createAArch64MCSubtargetInfo);
TargetRegistry::RegisterMCSubtargetInfo(TheAArch64beTarget,
createAArch64MCSubtargetInfo);
+ TargetRegistry::RegisterMCSubtargetInfo(TheARM64leTarget,
+ createAArch64MCSubtargetInfo);
+ TargetRegistry::RegisterMCSubtargetInfo(TheARM64beTarget,
+ createAArch64MCSubtargetInfo);
- // Register the MC instruction analyzer.
- TargetRegistry::RegisterMCInstrAnalysis(TheAArch64leTarget,
- createAArch64MCInstrAnalysis);
- TargetRegistry::RegisterMCInstrAnalysis(TheAArch64beTarget,
- createAArch64MCInstrAnalysis);
+ // Register the asm backend.
+ TargetRegistry::RegisterMCAsmBackend(TheAArch64leTarget,
+ createAArch64leAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheAArch64beTarget,
+ createAArch64beAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheARM64leTarget,
+ createAArch64leAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheARM64beTarget,
+ createAArch64beAsmBackend);
// Register the MC Code Emitter
TargetRegistry::RegisterMCCodeEmitter(TheAArch64leTarget,
createAArch64MCCodeEmitter);
TargetRegistry::RegisterMCCodeEmitter(TheAArch64beTarget,
createAArch64MCCodeEmitter);
-
- // Register the asm backend.
- TargetRegistry::RegisterMCAsmBackend(TheAArch64leTarget,
- createAArch64leAsmBackend);
- TargetRegistry::RegisterMCAsmBackend(TheAArch64beTarget,
- createAArch64beAsmBackend);
+ TargetRegistry::RegisterMCCodeEmitter(TheARM64leTarget,
+ createAArch64MCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheARM64beTarget,
+ createAArch64MCCodeEmitter);
// Register the object streamer.
TargetRegistry::RegisterMCObjectStreamer(TheAArch64leTarget,
createMCStreamer);
TargetRegistry::RegisterMCObjectStreamer(TheAArch64beTarget,
createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(TheARM64leTarget, createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(TheARM64beTarget, createMCStreamer);
// Register the MCInstPrinter.
TargetRegistry::RegisterMCInstPrinter(TheAArch64leTarget,
createAArch64MCInstPrinter);
TargetRegistry::RegisterMCInstPrinter(TheAArch64beTarget,
createAArch64MCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheARM64leTarget,
+ createAArch64MCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheARM64beTarget,
+ createAArch64MCInstPrinter);
}
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
index bd8beaf..d886ea2 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
@@ -11,18 +11,19 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64MCTARGETDESC_H
-#define LLVM_AARCH64MCTARGETDESC_H
+#ifndef AArch64MCTARGETDESC_H
+#define AArch64MCTARGETDESC_H
#include "llvm/Support/DataTypes.h"
+#include <string>
namespace llvm {
class MCAsmBackend;
class MCCodeEmitter;
class MCContext;
class MCInstrInfo;
-class MCObjectWriter;
class MCRegisterInfo;
+class MCObjectWriter;
class MCSubtargetInfo;
class StringRef;
class Target;
@@ -30,28 +31,25 @@ class raw_ostream;
extern Target TheAArch64leTarget;
extern Target TheAArch64beTarget;
-
-namespace AArch64_MC {
- MCSubtargetInfo *createAArch64MCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS);
-}
+extern Target TheARM64leTarget;
+extern Target TheARM64beTarget;
MCCodeEmitter *createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI,
- MCContext &Ctx);
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx);
+MCAsmBackend *createAArch64leAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI, StringRef TT,
+ StringRef CPU);
+MCAsmBackend *createAArch64beAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI, StringRef TT,
+ StringRef CPU);
-MCObjectWriter *createAArch64ELFObjectWriter(raw_ostream &OS,
- uint8_t OSABI,
+MCObjectWriter *createAArch64ELFObjectWriter(raw_ostream &OS, uint8_t OSABI,
bool IsLittleEndian);
-MCAsmBackend *createAArch64leAsmBackend(const Target &T,
- const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
-
-MCAsmBackend *createAArch64beAsmBackend(const Target &T,
- const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+MCObjectWriter *createAArch64MachObjectWriter(raw_ostream &OS, uint32_t CPUType,
+ uint32_t CPUSubtype);
} // End llvm namespace
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
new file mode 100644
index 0000000..5c86189
--- /dev/null
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
@@ -0,0 +1,396 @@
+//===-- AArch64MachObjectWriter.cpp - ARM Mach Object Writer --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCMachObjectWriter.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachO.h"
+using namespace llvm;
+
+namespace {
+class AArch64MachObjectWriter : public MCMachObjectTargetWriter {
+ bool getAArch64FixupKindMachOInfo(const MCFixup &Fixup, unsigned &RelocType,
+ const MCSymbolRefExpr *Sym,
+ unsigned &Log2Size, const MCAssembler &Asm);
+
+public:
+ AArch64MachObjectWriter(uint32_t CPUType, uint32_t CPUSubtype)
+ : MCMachObjectTargetWriter(true /* is64Bit */, CPUType, CPUSubtype,
+ /*UseAggressiveSymbolFolding=*/true) {}
+
+ void RecordRelocation(MachObjectWriter *Writer, const MCAssembler &Asm,
+ const MCAsmLayout &Layout, const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) override;
+};
+}
+
+bool AArch64MachObjectWriter::getAArch64FixupKindMachOInfo(
+ const MCFixup &Fixup, unsigned &RelocType, const MCSymbolRefExpr *Sym,
+ unsigned &Log2Size, const MCAssembler &Asm) {
+ RelocType = unsigned(MachO::ARM64_RELOC_UNSIGNED);
+ Log2Size = ~0U;
+
+ switch ((unsigned)Fixup.getKind()) {
+ default:
+ return false;
+
+ case FK_Data_1:
+ Log2Size = llvm::Log2_32(1);
+ return true;
+ case FK_Data_2:
+ Log2Size = llvm::Log2_32(2);
+ return true;
+ case FK_Data_4:
+ Log2Size = llvm::Log2_32(4);
+ if (Sym->getKind() == MCSymbolRefExpr::VK_GOT)
+ RelocType = unsigned(MachO::ARM64_RELOC_POINTER_TO_GOT);
+ return true;
+ case FK_Data_8:
+ Log2Size = llvm::Log2_32(8);
+ if (Sym->getKind() == MCSymbolRefExpr::VK_GOT)
+ RelocType = unsigned(MachO::ARM64_RELOC_POINTER_TO_GOT);
+ return true;
+ case AArch64::fixup_aarch64_add_imm12:
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ Log2Size = llvm::Log2_32(4);
+ switch (Sym->getKind()) {
+ default:
+ assert(0 && "Unexpected symbol reference variant kind!");
+ case MCSymbolRefExpr::VK_PAGEOFF:
+ RelocType = unsigned(MachO::ARM64_RELOC_PAGEOFF12);
+ return true;
+ case MCSymbolRefExpr::VK_GOTPAGEOFF:
+ RelocType = unsigned(MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12);
+ return true;
+ case MCSymbolRefExpr::VK_TLVPPAGEOFF:
+ RelocType = unsigned(MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12);
+ return true;
+ }
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ Log2Size = llvm::Log2_32(4);
+ // This encompasses the relocation for the whole 21-bit value.
+ switch (Sym->getKind()) {
+ default:
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "ADR/ADRP relocations must be GOT relative");
+ case MCSymbolRefExpr::VK_PAGE:
+ RelocType = unsigned(MachO::ARM64_RELOC_PAGE21);
+ return true;
+ case MCSymbolRefExpr::VK_GOTPAGE:
+ RelocType = unsigned(MachO::ARM64_RELOC_GOT_LOAD_PAGE21);
+ return true;
+ case MCSymbolRefExpr::VK_TLVPPAGE:
+ RelocType = unsigned(MachO::ARM64_RELOC_TLVP_LOAD_PAGE21);
+ return true;
+ }
+ return true;
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ Log2Size = llvm::Log2_32(4);
+ RelocType = unsigned(MachO::ARM64_RELOC_BRANCH26);
+ return true;
+ }
+}
+
+void AArch64MachObjectWriter::RecordRelocation(
+ MachObjectWriter *Writer, const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
+
+ // See <reloc.h>.
+ uint32_t FixupOffset = Layout.getFragmentOffset(Fragment);
+ unsigned Log2Size = 0;
+ int64_t Value = 0;
+ unsigned Index = 0;
+ unsigned IsExtern = 0;
+ unsigned Type = 0;
+ unsigned Kind = Fixup.getKind();
+
+ FixupOffset += Fixup.getOffset();
+
+ // AArch64 pcrel relocation addends do not include the section offset.
+ if (IsPCRel)
+ FixedValue += FixupOffset;
+
+ // ADRP fixups use relocations for the whole symbol value and only
+ // put the addend in the instruction itself. Clear out any value the
+ // generic code figured out from the sybmol definition.
+ if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
+ FixedValue = 0;
+
+ // imm19 relocations are for conditional branches, which require
+ // assembler local symbols. If we got here, that's not what we have,
+ // so complain loudly.
+ if (Kind == AArch64::fixup_aarch64_pcrel_branch19) {
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "conditional branch requires assembler-local"
+ " label. '" +
+ Target.getSymA()->getSymbol().getName() +
+ "' is external.");
+ return;
+ }
+
+ // 14-bit branch relocations should only target internal labels, and so
+ // should never get here.
+ if (Kind == AArch64::fixup_aarch64_pcrel_branch14) {
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "Invalid relocation on conditional branch!");
+ return;
+ }
+
+ if (!getAArch64FixupKindMachOInfo(Fixup, Type, Target.getSymA(), Log2Size,
+ Asm)) {
+ Asm.getContext().FatalError(Fixup.getLoc(), "unknown AArch64 fixup kind!");
+ return;
+ }
+
+ Value = Target.getConstant();
+
+ if (Target.isAbsolute()) { // constant
+ // FIXME: Should this always be extern?
+ // SymbolNum of 0 indicates the absolute section.
+ Type = MachO::ARM64_RELOC_UNSIGNED;
+ Index = 0;
+
+ if (IsPCRel) {
+ IsExtern = 1;
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "PC relative absolute relocation!");
+
+ // FIXME: x86_64 sets the type to a branch reloc here. Should we do
+ // something similar?
+ }
+ } else if (Target.getSymB()) { // A - B + constant
+ const MCSymbol *A = &Target.getSymA()->getSymbol();
+ const MCSymbolData &A_SD = Asm.getSymbolData(*A);
+ const MCSymbolData *A_Base = Asm.getAtom(&A_SD);
+
+ const MCSymbol *B = &Target.getSymB()->getSymbol();
+ const MCSymbolData &B_SD = Asm.getSymbolData(*B);
+ const MCSymbolData *B_Base = Asm.getAtom(&B_SD);
+
+ // Check for "_foo@got - .", which comes through here as:
+ // Ltmp0:
+ // ... _foo@got - Ltmp0
+ if (Target.getSymA()->getKind() == MCSymbolRefExpr::VK_GOT &&
+ Target.getSymB()->getKind() == MCSymbolRefExpr::VK_None &&
+ Layout.getSymbolOffset(&B_SD) ==
+ Layout.getFragmentOffset(Fragment) + Fixup.getOffset()) {
+ // SymB is the PC, so use a PC-rel pointer-to-GOT relocation.
+ Index = A_Base->getIndex();
+ IsExtern = 1;
+ Type = MachO::ARM64_RELOC_POINTER_TO_GOT;
+ IsPCRel = 1;
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
+ (IsExtern << 27) | (Type << 28));
+ Writer->addRelocation(Fragment->getParent(), MRE);
+ return;
+ } else if (Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None ||
+ Target.getSymB()->getKind() != MCSymbolRefExpr::VK_None)
+ // Otherwise, neither symbol can be modified.
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "unsupported relocation of modified symbol");
+
+ // We don't support PCrel relocations of differences.
+ if (IsPCRel)
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "unsupported pc-relative relocation of "
+ "difference");
+
+ // AArch64 always uses external relocations. If there is no symbol to use as
+ // a base address (a local symbol with no preceding non-local symbol),
+ // error out.
+ //
+ // FIXME: We should probably just synthesize an external symbol and use
+ // that.
+ if (!A_Base)
+ Asm.getContext().FatalError(
+ Fixup.getLoc(),
+ "unsupported relocation of local symbol '" + A->getName() +
+ "'. Must have non-local symbol earlier in section.");
+ if (!B_Base)
+ Asm.getContext().FatalError(
+ Fixup.getLoc(),
+ "unsupported relocation of local symbol '" + B->getName() +
+ "'. Must have non-local symbol earlier in section.");
+
+ if (A_Base == B_Base && A_Base)
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "unsupported relocation with identical base");
+
+ Value += (!A_SD.getFragment() ? 0
+ : Writer->getSymbolAddress(&A_SD, Layout)) -
+ (!A_Base || !A_Base->getFragment()
+ ? 0
+ : Writer->getSymbolAddress(A_Base, Layout));
+ Value -= (!B_SD.getFragment() ? 0
+ : Writer->getSymbolAddress(&B_SD, Layout)) -
+ (!B_Base || !B_Base->getFragment()
+ ? 0
+ : Writer->getSymbolAddress(B_Base, Layout));
+
+ Index = A_Base->getIndex();
+ IsExtern = 1;
+ Type = MachO::ARM64_RELOC_UNSIGNED;
+
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
+ (IsExtern << 27) | (Type << 28));
+ Writer->addRelocation(Fragment->getParent(), MRE);
+
+ Index = B_Base->getIndex();
+ IsExtern = 1;
+ Type = MachO::ARM64_RELOC_SUBTRACTOR;
+ } else { // A + constant
+ const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
+ const MCSymbolData &SD = Asm.getSymbolData(*Symbol);
+ const MCSymbolData *Base = Asm.getAtom(&SD);
+ const MCSectionMachO &Section = static_cast<const MCSectionMachO &>(
+ Fragment->getParent()->getSection());
+
+ // If the symbol is a variable and we weren't able to get a Base for it
+ // (i.e., it's not in the symbol table associated with a section) resolve
+ // the relocation based its expansion instead.
+ if (Symbol->isVariable() && !Base) {
+ // If the evaluation is an absolute value, just use that directly
+ // to keep things easy.
+ int64_t Res;
+ if (SD.getSymbol().getVariableValue()->EvaluateAsAbsolute(
+ Res, Layout, Writer->getSectionAddressMap())) {
+ FixedValue = Res;
+ return;
+ }
+
+ // FIXME: Will the Target we already have ever have any data in it
+ // we need to preserve and merge with the new Target? How about
+ // the FixedValue?
+ if (!Symbol->getVariableValue()->EvaluateAsRelocatable(Target, &Layout))
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "unable to resolve variable '" +
+ Symbol->getName() + "'");
+ return RecordRelocation(Writer, Asm, Layout, Fragment, Fixup, Target,
+ FixedValue);
+ }
+
+ // Relocations inside debug sections always use local relocations when
+ // possible. This seems to be done because the debugger doesn't fully
+ // understand relocation entries and expects to find values that
+ // have already been fixed up.
+ if (Symbol->isInSection()) {
+ if (Section.hasAttribute(MachO::S_ATTR_DEBUG))
+ Base = nullptr;
+ }
+
+ // AArch64 uses external relocations as much as possible. For debug
+ // sections, and for pointer-sized relocations (.quad), we allow section
+ // relocations. It's code sections that run into trouble.
+ if (Base) {
+ Index = Base->getIndex();
+ IsExtern = 1;
+
+ // Add the local offset, if needed.
+ if (Base != &SD)
+ Value += Layout.getSymbolOffset(&SD) - Layout.getSymbolOffset(Base);
+ } else if (Symbol->isInSection()) {
+ // Pointer-sized relocations can use a local relocation. Otherwise,
+ // we have to be in a debug info section.
+ if (!Section.hasAttribute(MachO::S_ATTR_DEBUG) && Log2Size != 3)
+ Asm.getContext().FatalError(
+ Fixup.getLoc(),
+ "unsupported relocation of local symbol '" + Symbol->getName() +
+ "'. Must have non-local symbol earlier in section.");
+ // Adjust the relocation to be section-relative.
+ // The index is the section ordinal (1-based).
+ const MCSectionData &SymSD =
+ Asm.getSectionData(SD.getSymbol().getSection());
+ Index = SymSD.getOrdinal() + 1;
+ IsExtern = 0;
+ Value += Writer->getSymbolAddress(&SD, Layout);
+
+ if (IsPCRel)
+ Value -= Writer->getFragmentAddress(Fragment, Layout) +
+ Fixup.getOffset() + (1ULL << Log2Size);
+ } else {
+ // Resolve constant variables.
+ if (SD.getSymbol().isVariable()) {
+ int64_t Res;
+ if (SD.getSymbol().getVariableValue()->EvaluateAsAbsolute(
+ Res, Layout, Writer->getSectionAddressMap())) {
+ FixedValue = Res;
+ return;
+ }
+ }
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "unsupported relocation of variable '" +
+ Symbol->getName() + "'");
+ }
+ }
+
+ // If the relocation kind is Branch26, Page21, or Pageoff12, any addend
+ // is represented via an Addend relocation, not encoded directly into
+ // the instruction.
+ if ((Type == MachO::ARM64_RELOC_BRANCH26 ||
+ Type == MachO::ARM64_RELOC_PAGE21 ||
+ Type == MachO::ARM64_RELOC_PAGEOFF12) &&
+ Value) {
+ assert((Value & 0xff000000) == 0 && "Added relocation out of range!");
+
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
+ (IsExtern << 27) | (Type << 28));
+ Writer->addRelocation(Fragment->getParent(), MRE);
+
+ // Now set up the Addend relocation.
+ Type = MachO::ARM64_RELOC_ADDEND;
+ Index = Value;
+ IsPCRel = 0;
+ Log2Size = 2;
+ IsExtern = 0;
+
+ // Put zero into the instruction itself. The addend is in the relocation.
+ Value = 0;
+ }
+
+ // If there's any addend left to handle, encode it in the instruction.
+ FixedValue = Value;
+
+ // struct relocation_info (8 bytes)
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
+ (IsExtern << 27) | (Type << 28));
+ Writer->addRelocation(Fragment->getParent(), MRE);
+}
+
+MCObjectWriter *llvm::createAArch64MachObjectWriter(raw_ostream &OS,
+ uint32_t CPUType,
+ uint32_t CPUSubtype) {
+ return createMachObjectWriter(
+ new AArch64MachObjectWriter(CPUType, CPUSubtype), OS,
+ /*IsLittleEndian=*/true);
+}
diff --git a/lib/Target/AArch64/MCTargetDesc/Android.mk b/lib/Target/AArch64/MCTargetDesc/Android.mk
index edcf1f2..c0cdb2b 100644
--- a/lib/Target/AArch64/MCTargetDesc/Android.mk
+++ b/lib/Target/AArch64/MCTargetDesc/Android.mk
@@ -10,6 +10,7 @@ arm64_mc_desc_SRC_FILES := \
AArch64AsmBackend.cpp \
AArch64ELFObjectWriter.cpp \
AArch64ELFStreamer.cpp \
+ AArch64MachObjectWriter.cpp \
AArch64MCAsmInfo.cpp \
AArch64MCCodeEmitter.cpp \
AArch64MCExpr.cpp \
diff --git a/lib/Target/AArch64/MCTargetDesc/CMakeLists.txt b/lib/Target/AArch64/MCTargetDesc/CMakeLists.txt
index 54c4465..7d5bced 100644
--- a/lib/Target/AArch64/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/AArch64/MCTargetDesc/CMakeLists.txt
@@ -6,4 +6,9 @@ add_llvm_library(LLVMAArch64Desc
AArch64MCCodeEmitter.cpp
AArch64MCExpr.cpp
AArch64MCTargetDesc.cpp
- )
+ AArch64MachObjectWriter.cpp
+)
+add_dependencies(LLVMAArch64Desc AArch64CommonTableGen)
+
+# Hack: we need to include 'main' target directory to grab private headers
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/.. ${CMAKE_CURRENT_BINARY_DIR}/..)
diff --git a/lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt b/lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt
index 37c8035..70cff0b 100644
--- a/lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt
+++ b/lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt
@@ -1,4 +1,4 @@
-;===- ./lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt ----------*- Conf -*--===;
+;===- ./lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt ------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;