diff options
Diffstat (limited to 'lib/CodeGen/AtomicExpandPass.cpp')
-rw-r--r-- | lib/CodeGen/AtomicExpandPass.cpp | 50 |
1 files changed, 18 insertions, 32 deletions
diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp index 12f6bd7..4b64be0 100644 --- a/lib/CodeGen/AtomicExpandPass.cpp +++ b/lib/CodeGen/AtomicExpandPass.cpp @@ -31,10 +31,11 @@ using namespace llvm; namespace { class AtomicExpand: public FunctionPass { const TargetMachine *TM; + const TargetLowering *TLI; public: static char ID; // Pass identification, replacement for typeid explicit AtomicExpand(const TargetMachine *TM = nullptr) - : FunctionPass(ID), TM(TM) { + : FunctionPass(ID), TM(TM), TLI(nullptr) { initializeAtomicExpandPass(*PassRegistry::getPassRegistry()); } @@ -67,9 +68,9 @@ FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) { } bool AtomicExpand::runOnFunction(Function &F) { - if (!TM || !TM->getSubtargetImpl()->enableAtomicExpand()) + if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand()) return false; - auto TargetLowering = TM->getSubtargetImpl()->getTargetLowering(); + TLI = TM->getSubtargetImpl(F)->getTargetLowering(); SmallVector<Instruction *, 1> AtomicInsts; @@ -91,7 +92,7 @@ bool AtomicExpand::runOnFunction(Function &F) { auto FenceOrdering = Monotonic; bool IsStore, IsLoad; - if (TargetLowering->getInsertFencesForAtomic()) { + if (TLI->getInsertFencesForAtomic()) { if (LI && isAtLeastAcquire(LI->getOrdering())) { FenceOrdering = LI->getOrdering(); LI->setOrdering(Monotonic); @@ -107,9 +108,9 @@ bool AtomicExpand::runOnFunction(Function &F) { FenceOrdering = RMWI->getOrdering(); RMWI->setOrdering(Monotonic); IsStore = IsLoad = true; - } else if (CASI && !TargetLowering->hasLoadLinkedStoreConditional() && - (isAtLeastRelease(CASI->getSuccessOrdering()) || - isAtLeastAcquire(CASI->getSuccessOrdering()))) { + } else if (CASI && !TLI->hasLoadLinkedStoreConditional() && + (isAtLeastRelease(CASI->getSuccessOrdering()) || + isAtLeastAcquire(CASI->getSuccessOrdering()))) { // If a compare and swap is lowered to LL/SC, we can do smarter fence // insertion, with a stronger one on the success path than on the // failure path. As a result, fence insertion is directly done by @@ -125,20 +126,19 @@ bool AtomicExpand::runOnFunction(Function &F) { } } - if (LI && TargetLowering->shouldExpandAtomicLoadInIR(LI)) { + if (LI && TLI->shouldExpandAtomicLoadInIR(LI)) { MadeChange |= expandAtomicLoad(LI); - } else if (SI && TargetLowering->shouldExpandAtomicStoreInIR(SI)) { + } else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) { MadeChange |= expandAtomicStore(SI); } else if (RMWI) { // There are two different ways of expanding RMW instructions: // - into a load if it is idempotent // - into a Cmpxchg/LL-SC loop otherwise // we try them in that order. - MadeChange |= (isIdempotentRMW(RMWI) && - simplifyIdempotentRMW(RMWI)) || - (TargetLowering->shouldExpandAtomicRMWInIR(RMWI) && - expandAtomicRMW(RMWI)); - } else if (CASI && TargetLowering->hasLoadLinkedStoreConditional()) { + MadeChange |= + (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) || + (TLI->shouldExpandAtomicRMWInIR(RMWI) && expandAtomicRMW(RMWI)); + } else if (CASI && TLI->hasLoadLinkedStoreConditional()) { MadeChange |= expandAtomicCmpXchg(CASI); } } @@ -149,13 +149,9 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order, bool IsStore, bool IsLoad) { IRBuilder<> Builder(I); - auto LeadingFence = - TM->getSubtargetImpl()->getTargetLowering()->emitLeadingFence( - Builder, Order, IsStore, IsLoad); + auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad); - auto TrailingFence = - TM->getSubtargetImpl()->getTargetLowering()->emitTrailingFence( - Builder, Order, IsStore, IsLoad); + auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad); // The trailing fence is emitted before the instruction instead of after // because there is no easy way of setting Builder insertion point after // an instruction. So we must erase it from the BB, and insert it back @@ -171,16 +167,13 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order, } bool AtomicExpand::expandAtomicLoad(LoadInst *LI) { - if (TM->getSubtargetImpl() - ->getTargetLowering() - ->hasLoadLinkedStoreConditional()) + if (TLI->hasLoadLinkedStoreConditional()) return expandAtomicLoadToLL(LI); else return expandAtomicLoadToCmpXchg(LI); } bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) { - auto TLI = TM->getSubtargetImpl()->getTargetLowering(); IRBuilder<> Builder(LI); // On some architectures, load-linked instructions are atomic for larger @@ -231,9 +224,7 @@ bool AtomicExpand::expandAtomicStore(StoreInst *SI) { } bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) { - if (TM->getSubtargetImpl() - ->getTargetLowering() - ->hasLoadLinkedStoreConditional()) + if (TLI->hasLoadLinkedStoreConditional()) return expandAtomicRMWToLLSC(AI); else return expandAtomicRMWToCmpXchg(AI); @@ -277,7 +268,6 @@ static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder, } bool AtomicExpand::expandAtomicRMWToLLSC(AtomicRMWInst *AI) { - auto TLI = TM->getSubtargetImpl()->getTargetLowering(); AtomicOrdering MemOpOrder = AI->getOrdering(); Value *Addr = AI->getPointerOperand(); BasicBlock *BB = AI->getParent(); @@ -397,7 +387,6 @@ bool AtomicExpand::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI) { } bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { - auto TLI = TM->getSubtargetImpl()->getTargetLowering(); AtomicOrdering SuccessOrder = CI->getSuccessOrdering(); AtomicOrdering FailureOrder = CI->getFailureOrdering(); Value *Addr = CI->getPointerOperand(); @@ -551,13 +540,10 @@ bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) { } bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) { - auto TLI = TM->getSubtargetImpl()->getTargetLowering(); - if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) { if (TLI->shouldExpandAtomicLoadInIR(ResultingLoad)) expandAtomicLoad(ResultingLoad); return true; } - return false; } |