aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms/Scalar/CodeGenPrepare.cpp
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2009-02-12 06:56:08 +0000
committerChris Lattner <sabre@nondot.org>2009-02-12 06:56:08 +0000
commitc0ecff424c9ba63fd6c357a1fd71c621fd6f45dd (patch)
treefdb91e305ba60400d584c73d4a1513bc5a88a902 /lib/Transforms/Scalar/CodeGenPrepare.cpp
parentd75427a40958f4c639f74e5d5a188d74dca0b7c5 (diff)
downloadexternal_llvm-c0ecff424c9ba63fd6c357a1fd71c621fd6f45dd.zip
external_llvm-c0ecff424c9ba63fd6c357a1fd71c621fd6f45dd.tar.gz
external_llvm-c0ecff424c9ba63fd6c357a1fd71c621fd6f45dd.tar.bz2
fix PR3537: if resetting bbi back to the start of a block, we need to
forget about already inserted expressions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@64362 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/Scalar/CodeGenPrepare.cpp')
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp13
1 files changed, 9 insertions, 4 deletions
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 0a1c641..12c76e8 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -1241,11 +1241,13 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// computation.
Value *&SunkAddr = SunkAddrs[Addr];
if (SunkAddr) {
- DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << "\n");
+ DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
+ << *MemoryInst);
if (SunkAddr->getType() != Addr->getType())
SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt);
} else {
- DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << "\n");
+ DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
+ << *MemoryInst);
const Type *IntPtrTy = TLI->getTargetData()->getIntPtrType();
Value *Result = 0;
@@ -1505,9 +1507,12 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
if (TLI && isa<InlineAsm>(CI->getCalledValue()))
if (const TargetAsmInfo *TAI =
TLI->getTargetMachine().getTargetAsmInfo()) {
- if (TAI->ExpandInlineAsm(CI))
+ if (TAI->ExpandInlineAsm(CI)) {
BBI = BB.begin();
- else
+ // Avoid processing instructions out of order, which could cause
+ // reuse before a value is defined.
+ SunkAddrs.clear();
+ } else
// Sink address computing for memory operands into the block.
MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
}