aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/AArch64/AArch64TargetMachine.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/AArch64/AArch64TargetMachine.cpp')
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.cpp95
1 files changed, 87 insertions, 8 deletions
diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp
index 722e5e7..beed8e0 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -12,8 +12,11 @@
#include "AArch64.h"
#include "AArch64TargetMachine.h"
-#include "llvm/PassManager.h"
+#include "AArch64TargetObjectFile.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/IR/Function.h"
+#include "llvm/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetOptions.h"
@@ -24,6 +27,10 @@ static cl::opt<bool>
EnableCCMP("aarch64-ccmp", cl::desc("Enable the CCMP formation pass"),
cl::init(true), cl::Hidden);
+static cl::opt<bool> EnableMCR("aarch64-mcr",
+ cl::desc("Enable the machine combiner pass"),
+ cl::init(true), cl::Hidden);
+
static cl::opt<bool>
EnableStPairSuppress("aarch64-stp-suppress", cl::desc("Suppress STP for AArch64"),
cl::init(true), cl::Hidden);
@@ -64,19 +71,36 @@ EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
cl::desc("Run early if-conversion"),
cl::init(true));
+static cl::opt<bool>
+EnableCondOpt("aarch64-condopt",
+ cl::desc("Enable the condition optimizer pass"),
+ cl::init(true), cl::Hidden);
static cl::opt<bool>
EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
cl::desc("Work around Cortex-A53 erratum 835769"),
cl::init(false));
+static cl::opt<bool>
+EnableGEPOpt("aarch64-gep-opt", cl::Hidden,
+ cl::desc("Enable optimizations on complex GEPs"),
+ cl::init(true));
+
extern "C" void LLVMInitializeAArch64Target() {
// Register the target.
RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget);
RegisterTargetMachine<AArch64beTargetMachine> Y(TheAArch64beTarget);
+ RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64Target);
+}
- RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64leTarget);
- RegisterTargetMachine<AArch64beTargetMachine> W(TheARM64beTarget);
+//===----------------------------------------------------------------------===//
+// AArch64 Lowering public interface.
+//===----------------------------------------------------------------------===//
+static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
+ if (TT.isOSBinFormatMachO())
+ return make_unique<AArch64_MachoTargetObjectFile>();
+
+ return make_unique<AArch64_ELFTargetObjectFile>();
}
/// TargetMachine ctor - Create an AArch64 architecture model.
@@ -88,10 +112,39 @@ AArch64TargetMachine::AArch64TargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL,
bool LittleEndian)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS, *this, LittleEndian) {
+ TLOF(createTLOF(Triple(getTargetTriple()))),
+ Subtarget(TT, CPU, FS, *this, LittleEndian), isLittle(LittleEndian) {
initAsmInfo();
}
+AArch64TargetMachine::~AArch64TargetMachine() {}
+
+const AArch64Subtarget *
+AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
+ AttributeSet FnAttrs = F.getAttributes();
+ Attribute CPUAttr =
+ FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu");
+ Attribute FSAttr =
+ FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features");
+
+ std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
+ ? CPUAttr.getValueAsString().str()
+ : TargetCPU;
+ std::string FS = !FSAttr.hasAttribute(Attribute::None)
+ ? FSAttr.getValueAsString().str()
+ : TargetFS;
+
+ auto &I = SubtargetMap[CPU + FS];
+ if (!I) {
+ // This needs to be done before we create a new subtarget since any
+ // creation will depend on the TM and the code generation flags on the
+ // function that reside in TargetOptions.
+ resetTargetOptions(F);
+ I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this, isLittle);
+ }
+ return I.get();
+}
+
void AArch64leTargetMachine::anchor() { }
AArch64leTargetMachine::
@@ -115,7 +168,10 @@ namespace {
class AArch64PassConfig : public TargetPassConfig {
public:
AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {}
+ : TargetPassConfig(TM, PM) {
+ if (TM->getOptLevel() != CodeGenOpt::None)
+ substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
+ }
AArch64TargetMachine &getAArch64TargetMachine() const {
return getTM<AArch64TargetMachine>();
@@ -147,7 +203,7 @@ TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
void AArch64PassConfig::addIRPasses() {
// Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
// ourselves.
- addPass(createAtomicExpandLoadLinkedPass(TM));
+ addPass(createAtomicExpandPass(TM));
// Cmpxchg instructions are often used with a subsequent comparison to
// determine whether it succeeded. We can exploit existing control-flow in
@@ -156,6 +212,19 @@ void AArch64PassConfig::addIRPasses() {
addPass(createCFGSimplificationPass());
TargetPassConfig::addIRPasses();
+
+ if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
+ // Call SeparateConstOffsetFromGEP pass to extract constants within indices
+ // and lower a GEP with multiple indices to either arithmetic operations or
+ // multiple GEPs with single index.
+ addPass(createSeparateConstOffsetFromGEPPass(TM, true));
+ // Call EarlyCSE pass to find and remove subexpressions in the lowered
+ // result.
+ addPass(createEarlyCSEPass());
+ // Do loop invariant code motion in case part of the lowered result is
+ // invariant.
+ addPass(createLICMPass());
+ }
}
// Pass Pipeline Configuration
@@ -185,8 +254,12 @@ bool AArch64PassConfig::addInstSelector() {
}
bool AArch64PassConfig::addILPOpts() {
+ if (EnableCondOpt)
+ addPass(createAArch64ConditionOptimizerPass());
if (EnableCCMP)
addPass(createAArch64ConditionalCompares());
+ if (EnableMCR)
+ addPass(&MachineCombinerID);
if (EnableEarlyIfConversion)
addPass(&EarlyIfConverterID);
if (EnableStPairSuppress)
@@ -196,8 +269,12 @@ bool AArch64PassConfig::addILPOpts() {
bool AArch64PassConfig::addPreRegAlloc() {
// Use AdvSIMD scalar instructions whenever profitable.
- if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar)
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
addPass(createAArch64AdvSIMDScalar());
+ // The AdvSIMD pass may produce copies that can be rewritten to
+ // be register coaleascer friendly.
+ addPass(&PeepholeOptimizerID);
+ }
return true;
}
@@ -206,7 +283,9 @@ bool AArch64PassConfig::addPostRegAlloc() {
if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
addPass(createAArch64DeadRegisterDefinitions());
if (TM->getOptLevel() != CodeGenOpt::None &&
- TM->getSubtarget<AArch64Subtarget>().isCortexA57())
+ (TM->getSubtarget<AArch64Subtarget>().isCortexA53() ||
+ TM->getSubtarget<AArch64Subtarget>().isCortexA57()) &&
+ usingDefaultRegAlloc())
// Improve performance for some FP/SIMD code for A57.
addPass(createAArch64A57FPLoadBalancing());
return true;