diff options
author | Chris Lattner <sabre@nondot.org> | 2004-02-17 08:09:40 +0000 |
---|---|---|
committer | Chris Lattner <sabre@nondot.org> | 2004-02-17 08:09:40 +0000 |
commit | 11390e76e73d36e62c981069a67d1d33823262de (patch) | |
tree | 3a8e3085beea860e71cb73ff32f271aabc2b1520 /lib/CodeGen | |
parent | 18bd7bb4d453e014c39729c4bf2b43f1846b8a9a (diff) | |
download | external_llvm-11390e76e73d36e62c981069a67d1d33823262de.zip external_llvm-11390e76e73d36e62c981069a67d1d33823262de.tar.gz external_llvm-11390e76e73d36e62c981069a67d1d33823262de.tar.bz2 |
Add support to the local allocator for fusing spill code into the instructions
that need them. This is very useful on CISCy targets like the X86 because it
reduces the total spill pressure, and makes better use of it's (large)
instruction set. Though the X86 backend doesn't know how to rewrite many
instructions yet, this already makes a substantial difference on 176.gcc for
example:
Before:
Time:
8.0099 ( 31.2%) 0.0100 ( 12.5%) 8.0199 ( 31.2%) 7.7186 ( 30.0%) Local Register Allocator
Code quality:
734559 asm-printer - Number of machine instrs printed
111395 ra-local - Number of registers reloaded
79902 ra-local - Number of registers spilled
231554 x86-peephole - Number of peephole optimization performed
After:
Time:
7.8700 ( 30.6%) 0.0099 ( 19.9%) 7.8800 ( 30.6%) 7.7892 ( 30.2%) Local Register Allocator
Code quality:
733083 asm-printer - Number of machine instrs printed
2379 ra-local - Number of reloads fused into instructions
109046 ra-local - Number of registers reloaded
79881 ra-local - Number of registers spilled
230658 x86-peephole - Number of peephole optimization performed
So by fusing 2300 instructions, we reduced the static number of instructions
by 1500, and reduces the number of peepholes (and thus the work) by about 900.
This also clearly reduces the number of reload/spill instructions that are
emitted.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@11542 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen')
-rw-r--r-- | lib/CodeGen/RegAllocLocal.cpp | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/lib/CodeGen/RegAllocLocal.cpp b/lib/CodeGen/RegAllocLocal.cpp index f8e5086..59bb1a5 100644 --- a/lib/CodeGen/RegAllocLocal.cpp +++ b/lib/CodeGen/RegAllocLocal.cpp @@ -30,6 +30,7 @@ using namespace llvm; namespace { Statistic<> NumSpilled ("ra-local", "Number of registers spilled"); Statistic<> NumReloaded("ra-local", "Number of registers reloaded"); + Statistic<> NumFused ("ra-local", "Number of reloads fused into instructions"); cl::opt<bool> DisableKill("disable-kill", cl::Hidden, cl::desc("Disable register kill in local-ra")); @@ -491,14 +492,16 @@ MachineInstr *RA::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI, // If we have registers available to hold the value, use them. const TargetRegisterClass *RC = MF->getSSARegMap()->getRegClass(VirtReg); unsigned PhysReg = getFreeReg(RC); + int FrameIndex = getStackSpaceFor(VirtReg, RC); - if (PhysReg) { // PhysReg available! - PhysReg = getReg(MBB, MI, VirtReg); - } else { // No registers available... - /// If we can fold this spill into this instruction, do so now. - if (0) { - // TODO - return MI; + if (PhysReg) { // Register is available, allocate it! + assignVirtToPhysReg(VirtReg, PhysReg); + } else { // No registers available. + // If we can fold this spill into this instruction, do so now. + MachineBasicBlock::iterator MII = MI; + if (RegInfo->foldMemoryOperand(MII, OpNum, FrameIndex)) { + ++NumFused; + return MII; } // It looks like we can't fold this virtual register load into this @@ -507,8 +510,6 @@ MachineInstr *RA::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI, PhysReg = getReg(MBB, MI, VirtReg); } - int FrameIndex = getStackSpaceFor(VirtReg, RC); - markVirtRegModified(VirtReg, false); // Note that this reg was just reloaded DEBUG(std::cerr << " Reloading %reg" << VirtReg << " into " @@ -565,9 +566,10 @@ void RA::AllocateBasicBlock(MachineBasicBlock &MBB) { unsigned VirtReg = KI->second; unsigned PhysReg = VirtReg; if (MRegisterInfo::isVirtualRegister(VirtReg)) { + // If the virtual register was never materialized into a register, it + // might not be in the map, but it won't hurt to zero it out anyway. unsigned &PhysRegSlot = getVirt2PhysRegMapSlot(VirtReg); PhysReg = PhysRegSlot; - assert(PhysReg != 0); PhysRegSlot = 0; } @@ -599,7 +601,7 @@ void RA::AllocateBasicBlock(MachineBasicBlock &MBB) { for (const unsigned *ImplicitDefs = TID.ImplicitDefs; *ImplicitDefs; ++ImplicitDefs) { unsigned Reg = *ImplicitDefs; - spillPhysReg(MBB, MI, Reg); + spillPhysReg(MBB, MI, Reg, true); PhysRegsUseOrder.push_back(Reg); PhysRegsUsed[Reg] = 0; // It is free and reserved now for (const unsigned *AliasSet = RegInfo->getAliasSet(Reg); |