Newer
Older
//===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the RAGreedy function pass for register allocation in
// optimized builds.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "regalloc"
Jakob Stoklund Olesen
committed
#include "AllocationOrder.h"
Jakob Stoklund Olesen
committed
#include "InterferenceCache.h"
#include "LiveDebugVariables.h"
#include "LiveRangeEdit.h"
#include "RegAllocBase.h"
#include "Spiller.h"
#include "SpillPlacement.h"
Jakob Stoklund Olesen
committed
#include "SplitKit.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Function.h"
#include "llvm/PassAnalysisSupport.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/EdgeBundles.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
Jakob Stoklund Olesen
committed
#include "llvm/CodeGen/MachineLoopRanges.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/RegisterCoalescer.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
Jakob Stoklund Olesen
committed
#include "llvm/Support/Timer.h"
Jakob Stoklund Olesen
committed
#include <queue>
STATISTIC(NumGlobalSplits, "Number of split global live ranges");
STATISTIC(NumLocalSplits, "Number of split local live ranges");
STATISTIC(NumEvicted, "Number of interferences evicted");
static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
createGreedyRegisterAllocator);
namespace {
class RAGreedy : public MachineFunctionPass,
public RegAllocBase,
private LiveRangeEdit::Delegate {
// context
MachineFunction *MF;
BitVector ReservedRegs;
// analyses
SlotIndexes *Indexes;
MachineDominatorTree *DomTree;
Jakob Stoklund Olesen
committed
MachineLoopInfo *Loops;
MachineLoopRanges *LoopRanges;
EdgeBundles *Bundles;
SpillPlacement *SpillPlacer;
// state
std::auto_ptr<Spiller> SpillerInstance;
Jakob Stoklund Olesen
committed
std::priority_queue<std::pair<unsigned, unsigned> > Queue;
Jakob Stoklund Olesen
committed
// Live ranges pass through a number of stages as we try to allocate them.
// Some of the stages may also create new live ranges:
//
// - Region splitting.
// - Per-block splitting.
// - Local splitting.
// - Spilling.
//
// Ranges produced by one of the stages skip the previous stages when they are
// dequeued. This improves performance because we can skip interference checks
// that are unlikely to give any results. It also guarantees that the live
// range splitting algorithm terminates, something that is otherwise hard to
// ensure.
enum LiveRangeStage {
RS_New, ///< Never seen before.
RS_First, ///< First time in the queue.
Jakob Stoklund Olesen
committed
RS_Second, ///< Second time in the queue.
RS_Region, ///< Produced by region splitting.
RS_Block, ///< Produced by per-block splitting.
RS_Local, ///< Produced by local splitting.
RS_Spill ///< Produced by spilling.
};
IndexedMap<unsigned char, VirtReg2IndexFunctor> LRStage;
LiveRangeStage getStage(const LiveInterval &VirtReg) const {
return LiveRangeStage(LRStage[VirtReg.reg]);
}
template<typename Iterator>
void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
LRStage.resize(MRI->getNumVirtRegs());
for (;Begin != End; ++Begin) {
unsigned Reg = (*Begin)->reg;
if (LRStage[Reg] == RS_New)
LRStage[Reg] = NewStage;
}
Jakob Stoklund Olesen
committed
}
// splitting state.
Jakob Stoklund Olesen
committed
std::auto_ptr<SplitAnalysis> SA;
Jakob Stoklund Olesen
committed
std::auto_ptr<SplitEditor> SE;
/// Cached per-block interference maps
InterferenceCache IntfCache;
/// All basic blocks where the current register is live.
Jakob Stoklund Olesen
committed
SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
Jakob Stoklund Olesen
committed
/// Global live range splitting candidate info.
struct GlobalSplitCandidate {
unsigned PhysReg;
BitVector LiveBundles;
};
/// Candidate info for for each PhysReg in AllocationOrder.
/// This vector never shrinks, but grows to the size of the largest register
/// class.
SmallVector<GlobalSplitCandidate, 32> GlobalCand;
/// For every instruction in SA->UseSlots, store the previous non-copy
/// instruction.
SmallVector<SlotIndex, 8> PrevSlot;
public:
RAGreedy();
/// Return the pass name.
virtual const char* getPassName() const {
Jakob Stoklund Olesen
committed
return "Greedy Register Allocator";
}
/// RAGreedy analysis usage.
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
virtual void releaseMemory();
virtual Spiller &spiller() { return *SpillerInstance; }
Jakob Stoklund Olesen
committed
virtual void enqueue(LiveInterval *LI);
virtual LiveInterval *dequeue();
virtual unsigned selectOrSplit(LiveInterval&,
SmallVectorImpl<LiveInterval*>&);
/// Perform register allocation.
virtual bool runOnMachineFunction(MachineFunction &mf);
static char ID;
private:
void LRE_WillEraseInstruction(MachineInstr*);
bool LRE_CanEraseVirtReg(unsigned);
Jakob Stoklund Olesen
committed
void LRE_WillShrinkVirtReg(unsigned);
void LRE_DidCloneVirtReg(unsigned, unsigned);
Jakob Stoklund Olesen
committed
bool addSplitConstraints(unsigned, float&);
Jakob Stoklund Olesen
committed
float calcGlobalSplitCost(unsigned, const BitVector&);
void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
SmallVectorImpl<LiveInterval*>&);
void calcGapWeights(unsigned, SmallVectorImpl<float>&);
SlotIndex getPrevMappedIndex(const MachineInstr*);
void calcPrevSlots();
unsigned nextSplitPoint(unsigned);
Jakob Stoklund Olesen
committed
bool canEvictInterference(LiveInterval&, unsigned, float&);
unsigned tryEvict(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&);
unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&);
unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&);
unsigned trySplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&);
};
} // end anonymous namespace
char RAGreedy::ID = 0;
FunctionPass* llvm::createGreedyRegisterAllocator() {
return new RAGreedy();
}
RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_New) {
initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesen
committed
initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
}
void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
AU.addRequired<LiveIntervals>();
AU.addRequired<SlotIndexes>();
AU.addRequired<LiveDebugVariables>();
AU.addPreserved<LiveDebugVariables>();
if (StrongPHIElim)
AU.addRequiredID(StrongPHIEliminationID);
AU.addRequiredTransitive<RegisterCoalescer>();
AU.addRequired<CalculateSpillWeights>();
AU.addRequired<LiveStacks>();
AU.addPreserved<LiveStacks>();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
Jakob Stoklund Olesen
committed
AU.addRequired<MachineLoopRanges>();
AU.addPreserved<MachineLoopRanges>();
AU.addRequired<VirtRegMap>();
AU.addPreserved<VirtRegMap>();
AU.addRequired<EdgeBundles>();
AU.addRequired<SpillPlacement>();
MachineFunctionPass::getAnalysisUsage(AU);
}
//===----------------------------------------------------------------------===//
// LiveRangeEdit delegate methods
//===----------------------------------------------------------------------===//
void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
// LRE itself will remove from SlotIndexes and parent basic block.
VRM->RemoveMachineInstrFromMaps(MI);
}
bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
unassign(LIS->getInterval(VirtReg), PhysReg);
return true;
}
// Unassigned virtreg is probably in the priority queue.
// RegAllocBase will erase it after dequeueing.
return false;
}
Jakob Stoklund Olesen
committed
void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
unsigned PhysReg = VRM->getPhys(VirtReg);
if (!PhysReg)
return;
// Register is assigned, put it back on the queue for reassignment.
LiveInterval &LI = LIS->getInterval(VirtReg);
unassign(LI, PhysReg);
enqueue(&LI);
}
void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
// LRE may clone a virtual register because dead code elimination causes it to
// be split into connected components. Ensure that the new register gets the
// same stage as the parent.
LRStage.grow(New);
LRStage[New] = LRStage[Old];
}
void RAGreedy::releaseMemory() {
SpillerInstance.reset(0);
Jakob Stoklund Olesen
committed
LRStage.clear();
RegAllocBase::releaseMemory();
}
Jakob Stoklund Olesen
committed
void RAGreedy::enqueue(LiveInterval *LI) {
// Prioritize live ranges by size, assigning larger ranges first.
// The queue holds (size, reg) pairs.
const unsigned Size = LI->getSize();
const unsigned Reg = LI->reg;
Jakob Stoklund Olesen
committed
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"Can only enqueue virtual registers");
unsigned Prio;
Jakob Stoklund Olesen
committed
LRStage.grow(Reg);
if (LRStage[Reg] == RS_New)
LRStage[Reg] = RS_First;
if (LRStage[Reg] == RS_Second)
// Unsplit ranges that couldn't be allocated immediately are deferred until
// everything else has been allocated. Long ranges are allocated last so
// they are split against realistic interference.
Prio = (1u << 31) - Size;
else {
// Everything else is allocated in long->short order. Long ranges that don't
// fit should be spilled ASAP so they don't create interference.
Prio = (1u << 31) + Size;
Jakob Stoklund Olesen
committed
// Boost ranges that have a physical register hint.
if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
Prio |= (1u << 30);
}
Queue.push(std::make_pair(Prio, Reg));
}
Jakob Stoklund Olesen
committed
LiveInterval *RAGreedy::dequeue() {
if (Queue.empty())
return 0;
LiveInterval *LI = &LIS->getInterval(Queue.top().second);
Queue.pop();
return LI;
}
Jakob Stoklund Olesen
committed
//===----------------------------------------------------------------------===//
// Interference eviction
//===----------------------------------------------------------------------===//
Jakob Stoklund Olesen
committed
/// canEvict - Return true if all interferences between VirtReg and PhysReg can
/// be evicted. Set maxWeight to the maximal spill weight of an interference.
bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
Jakob Stoklund Olesen
committed
float &MaxWeight) {
float Weight = 0;
for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
// If there is 10 or more interferences, chances are one is smaller.
if (Q.collectInterferingVRegs(10) >= 10)
return false;
Jakob Stoklund Olesen
committed
// Check if any interfering live range is heavier than VirtReg.
for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
LiveInterval *Intf = Q.interferingVRegs()[i];
if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
return false;
Jakob Stoklund Olesen
committed
if (Intf->weight >= VirtReg.weight)
return false;
Weight = std::max(Weight, Intf->weight);
Jakob Stoklund Olesen
committed
}
}
MaxWeight = Weight;
return true;
}
/// tryEvict - Try to evict all interferences for a physreg.
/// @param VirtReg Currently unassigned virtual register.
/// @param Order Physregs to try.
/// @return Physreg to assign VirtReg, or 0.
unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
AllocationOrder &Order,
SmallVectorImpl<LiveInterval*> &NewVRegs){
NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
// Keep track of the lightest single interference seen so far.
float BestWeight = 0;
unsigned BestPhys = 0;
Order.rewind();
while (unsigned PhysReg = Order.next()) {
float Weight = 0;
Jakob Stoklund Olesen
committed
if (!canEvictInterference(VirtReg, PhysReg, Weight))
continue;
// This is an eviction candidate.
DEBUG(dbgs() << "max " << PrintReg(PhysReg, TRI) << " interference = "
<< Weight << '\n');
if (BestPhys && Weight >= BestWeight)
continue;
// Best so far.
BestPhys = PhysReg;
BestWeight = Weight;
Jakob Stoklund Olesen
committed
// Stop if the hint can be used.
if (Order.isHint(PhysReg))
break;
Jakob Stoklund Olesen
committed
}
if (!BestPhys)
return 0;
DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
LiveInterval *Intf = Q.interferingVRegs()[i];
unassign(*Intf, VRM->getPhys(Intf->reg));
++NumEvicted;
NewVRegs.push_back(Intf);
}
}
return BestPhys;
Jakob Stoklund Olesen
committed
//===----------------------------------------------------------------------===//
// Region Splitting
//===----------------------------------------------------------------------===//
Jakob Stoklund Olesen
committed
/// addSplitConstraints - Fill out the SplitConstraints vector based on the
/// interference pattern in Physreg and its aliases. Add the constraints to
/// SpillPlacement and return the static cost of this split in Cost, assuming
/// that all preferences in SplitConstraints are met.
/// If it is evident that no bundles will be live, abort early and return false.
bool RAGreedy::addSplitConstraints(unsigned PhysReg, float &Cost) {
InterferenceCache::Cursor Intf(IntfCache, PhysReg);
Jakob Stoklund Olesen
committed
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
// Reset interference dependent info.
Jakob Stoklund Olesen
committed
SplitConstraints.resize(UseBlocks.size());
Jakob Stoklund Olesen
committed
float StaticCost = 0;
Jakob Stoklund Olesen
committed
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
Jakob Stoklund Olesen
committed
SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
Jakob Stoklund Olesen
committed
BC.Number = BI.MBB->getNumber();
Intf.moveToBlock(BC.Number);
Jakob Stoklund Olesen
committed
BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
if (!Intf.hasInterference())
continue;
Jakob Stoklund Olesen
committed
// Number of spill code instructions to insert.
unsigned Ins = 0;
// Interference for the live-in value.
Jakob Stoklund Olesen
committed
if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
Jakob Stoklund Olesen
committed
BC.Entry = SpillPlacement::MustSpill, ++Ins;
else if (Intf.first() < BI.FirstUse)
Jakob Stoklund Olesen
committed
BC.Entry = SpillPlacement::PrefSpill, ++Ins;
else if (Intf.first() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
Jakob Stoklund Olesen
committed
++Ins;
Jakob Stoklund Olesen
committed
// Interference for the live-out value.
Jakob Stoklund Olesen
committed
if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
Jakob Stoklund Olesen
committed
BC.Exit = SpillPlacement::MustSpill, ++Ins;
else if (Intf.last() > BI.LastUse)
Jakob Stoklund Olesen
committed
BC.Exit = SpillPlacement::PrefSpill, ++Ins;
else if (Intf.last() > (BI.LiveThrough ? BI.FirstUse : BI.Def))
Jakob Stoklund Olesen
committed
++Ins;
}
Jakob Stoklund Olesen
committed
// Accumulate the total frequency of inserted spill code.
if (Ins)
StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
}
Jakob Stoklund Olesen
committed
Jakob Stoklund Olesen
committed
// Add constraints for use-blocks. Note that these are the only constraints
// that may add a positive bias, it is downhill from here.
SpillPlacer->addConstraints(SplitConstraints);
if (SpillPlacer->getPositiveNodes() == 0)
return false;
Jakob Stoklund Olesen
committed
Jakob Stoklund Olesen
committed
Cost = StaticCost;
Jakob Stoklund Olesen
committed
Jakob Stoklund Olesen
committed
// Now handle the live-through blocks without uses. These can only add
// negative bias, so we can abort whenever there are no more positive nodes.
// Compute constraints for a group of 8 blocks at a time.
const unsigned GroupSize = 8;
SpillPlacement::BlockConstraint BCS[GroupSize];
unsigned B = 0;
Jakob Stoklund Olesen
committed
Jakob Stoklund Olesen
committed
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
ArrayRef<unsigned> ThroughBlocks = SA->getThroughBlocks();
for (unsigned i = 0; i != ThroughBlocks.size(); ++i) {
unsigned Number = ThroughBlocks[i];
assert(B < GroupSize && "Array overflow");
BCS[B].Number = Number;
Intf.moveToBlock(Number);
if (Intf.hasInterference()) {
// Interference for the live-in value.
if (Intf.first() <= Indexes->getMBBStartIdx(Number))
BCS[B].Entry = SpillPlacement::MustSpill;
else
BCS[B].Entry = SpillPlacement::PrefSpill;
// Interference for the live-out value.
if (Intf.last() >= SA->getLastSplitPoint(Number))
BCS[B].Exit = SpillPlacement::MustSpill;
else
BCS[B].Exit = SpillPlacement::PrefSpill;
} else {
// No interference, transparent block.
BCS[B].Entry = BCS[B].Exit = SpillPlacement::DontCare;
}
if (++B == GroupSize) {
ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
SpillPlacer->addConstraints(Array);
B = 0;
// Abort early when all hope is lost.
if (SpillPlacer->getPositiveNodes() == 0)
return false;
}
Jakob Stoklund Olesen
committed
}
Jakob Stoklund Olesen
committed
ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
SpillPlacer->addConstraints(Array);
return SpillPlacer->getPositiveNodes() != 0;
}
Jakob Stoklund Olesen
committed
/// calcGlobalSplitCost - Return the global split cost of following the split
/// pattern in LiveBundles. This cost should be added to the local cost of the
Jakob Stoklund Olesen
committed
/// interference pattern in SplitConstraints.
///
Jakob Stoklund Olesen
committed
float RAGreedy::calcGlobalSplitCost(unsigned PhysReg,
const BitVector &LiveBundles) {
float GlobalCost = 0;
Jakob Stoklund Olesen
committed
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
Jakob Stoklund Olesen
committed
SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
unsigned Ins = 0;
Jakob Stoklund Olesen
committed
if (BI.LiveIn)
Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
if (BI.LiveOut)
Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
if (Ins)
GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
}
Jakob Stoklund Olesen
committed
Jakob Stoklund Olesen
committed
InterferenceCache::Cursor Intf(IntfCache, PhysReg);
Jakob Stoklund Olesen
committed
ArrayRef<unsigned> ThroughBlocks = SA->getThroughBlocks();
SplitConstraints.resize(UseBlocks.size() + ThroughBlocks.size());
for (unsigned i = 0; i != ThroughBlocks.size(); ++i) {
unsigned Number = ThroughBlocks[i];
bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
Jakob Stoklund Olesen
committed
if (!RegIn && !RegOut)
continue;
if (RegIn && RegOut) {
// We need double spill code if this block has interference.
Intf.moveToBlock(Number);
if (Intf.hasInterference())
GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
continue;
}
// live-in / stack-out or stack-in live-out.
GlobalCost += SpillPlacer->getBlockFrequency(Number);
Jakob Stoklund Olesen
committed
}
return GlobalCost;
}
/// splitAroundRegion - Split VirtReg around the region determined by
/// LiveBundles. Make an effort to avoid interference from PhysReg.
///
/// The 'register' interval is going to contain as many uses as possible while
/// avoiding interference. The 'stack' interval is the complement constructed by
/// SplitEditor. It will contain the rest.
///
void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
const BitVector &LiveBundles,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
DEBUG({
dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI)
<< " with bundles";
for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
dbgs() << " EB#" << i;
dbgs() << ".\n";
});
InterferenceCache::Cursor Intf(IntfCache, PhysReg);
LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
Jakob Stoklund Olesen
committed
SE->reset(LREdit);
// Create the main cross-block interval.
Jakob Stoklund Olesen
committed
SE->openIntv();
// First add all defs that are live out of a block.
Jakob Stoklund Olesen
committed
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
// Should the register be live out?
if (!BI.LiveOut || !RegOut)
continue;
Jakob Stoklund Olesen
committed
SlotIndex Start, Stop;
tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
Intf.moveToBlock(BI.MBB->getNumber());
DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
Jakob Stoklund Olesen
committed
<< Bundles->getBundle(BI.MBB->getNumber(), 1)
Jakob Stoklund Olesen
committed
<< " [" << Start << ';'
<< SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
<< ") intf [" << Intf.first() << ';' << Intf.last() << ')');
Jakob Stoklund Olesen
committed
// The interference interval should either be invalid or overlap MBB.
Jakob Stoklund Olesen
committed
assert((!Intf.hasInterference() || Intf.first() < Stop)
&& "Bad interference");
Jakob Stoklund Olesen
committed
assert((!Intf.hasInterference() || Intf.last() > Start)
Jakob Stoklund Olesen
committed
&& "Bad interference");
// Check interference leaving the block.
if (!Intf.hasInterference()) {
// Block is interference-free.
DEBUG(dbgs() << ", no interference");
if (!BI.LiveThrough) {
DEBUG(dbgs() << ", not live-through.\n");
Jakob Stoklund Olesen
committed
SE->useIntv(SE->enterIntvBefore(BI.Def), Stop);
continue;
}
if (!RegIn) {
// Block is live-through, but entry bundle is on the stack.
// Reload just before the first use.
DEBUG(dbgs() << ", not live-in, enter before first use.\n");
Jakob Stoklund Olesen
committed
SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
continue;
}
DEBUG(dbgs() << ", live-through.\n");
continue;
}
// Block has interference.
DEBUG(dbgs() << ", interference to " << Intf.last());
if (!BI.LiveThrough && Intf.last() <= BI.Def) {
// The interference doesn't reach the outgoing segment.
DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
Jakob Stoklund Olesen
committed
SE->useIntv(BI.Def, Stop);
continue;
}
Jakob Stoklund Olesen
committed
SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
if (Intf.last().getBoundaryIndex() < BI.LastUse) {
// There are interference-free uses at the end of the block.
// Find the first use that can get the live-out register.
Jakob Stoklund Olesen
committed
SmallVectorImpl<SlotIndex>::const_iterator UI =
std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
Intf.last().getBoundaryIndex());
Jakob Stoklund Olesen
committed
assert(UI != SA->UseSlots.end() && "Couldn't find last use");
SlotIndex Use = *UI;
assert(Use <= BI.LastUse && "Couldn't find last use");
// Only attempt a split befroe the last split point.
Jakob Stoklund Olesen
committed
if (Use.getBaseIndex() <= LastSplitPoint) {
DEBUG(dbgs() << ", free use at " << Use << ".\n");
Jakob Stoklund Olesen
committed
SlotIndex SegStart = SE->enterIntvBefore(Use);
assert(SegStart >= Intf.last() && "Couldn't avoid interference");
Jakob Stoklund Olesen
committed
assert(SegStart < LastSplitPoint && "Impossible split point");
Jakob Stoklund Olesen
committed
SE->useIntv(SegStart, Stop);
continue;
}
}
// Interference is after the last use.
DEBUG(dbgs() << " after last use.\n");
Jakob Stoklund Olesen
committed
SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB);
assert(SegStart >= Intf.last() && "Couldn't avoid interference");
}
// Now all defs leading to live bundles are handled, do everything else.
Jakob Stoklund Olesen
committed
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
// Is the register live-in?
if (!BI.LiveIn || !RegIn)
continue;
// We have an incoming register. Check for interference.
Jakob Stoklund Olesen
committed
SlotIndex Start, Stop;
tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
Intf.moveToBlock(BI.MBB->getNumber());
DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
Jakob Stoklund Olesen
committed
<< " -> BB#" << BI.MBB->getNumber() << " [" << Start << ';'
Jakob Stoklund Olesen
committed
<< SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
<< ')');
// Check interference entering the block.
if (!Intf.hasInterference()) {
// Block is interference-free.
DEBUG(dbgs() << ", no interference");
if (!BI.LiveThrough) {
DEBUG(dbgs() << ", killed in block.\n");
Jakob Stoklund Olesen
committed
SE->useIntv(Start, SE->leaveIntvAfter(BI.Kill));
continue;
}
if (!RegOut) {
Jakob Stoklund Olesen
committed
SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
// Block is live-through, but exit bundle is on the stack.
// Spill immediately after the last use.
Jakob Stoklund Olesen
committed
if (BI.LastUse < LastSplitPoint) {
Jakob Stoklund Olesen
committed
DEBUG(dbgs() << ", uses, stack-out.\n");
Jakob Stoklund Olesen
committed
SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
Jakob Stoklund Olesen
committed
continue;
}
// The last use is after the last split point, it is probably an
// indirect jump.
DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
Jakob Stoklund Olesen
committed
<< LastSplitPoint << ", stack-out.\n");
SlotIndex SegEnd = SE->leaveIntvBefore(LastSplitPoint);
Jakob Stoklund Olesen
committed
SE->useIntv(Start, SegEnd);
Jakob Stoklund Olesen
committed
// Run a double interval from the split to the last use.
// This makes it possible to spill the complement without affecting the
// indirect branch.
Jakob Stoklund Olesen
committed
SE->overlapIntv(SegEnd, BI.LastUse);
continue;
}
// Register is live-through.
DEBUG(dbgs() << ", uses, live-through.\n");
Jakob Stoklund Olesen
committed
SE->useIntv(Start, Stop);
continue;
}
// Block has interference.
DEBUG(dbgs() << ", interference from " << Intf.first());
if (!BI.LiveThrough && Intf.first() >= BI.Kill) {
// The interference doesn't reach the outgoing segment.
DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
Jakob Stoklund Olesen
committed
SE->useIntv(Start, BI.Kill);
continue;
}
if (Intf.first().getBaseIndex() > BI.FirstUse) {
// There are interference-free uses at the beginning of the block.
// Find the last use that can get the register.
Jakob Stoklund Olesen
committed
SmallVectorImpl<SlotIndex>::const_iterator UI =
std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
Intf.first().getBaseIndex());
Jakob Stoklund Olesen
committed
assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
SlotIndex Use = (--UI)->getBoundaryIndex();
DEBUG(dbgs() << ", free use at " << *UI << ".\n");
Jakob Stoklund Olesen
committed
SlotIndex SegEnd = SE->leaveIntvAfter(Use);
assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
Jakob Stoklund Olesen
committed
SE->useIntv(Start, SegEnd);
continue;
}
// Interference is before the first use.
DEBUG(dbgs() << " before first use.\n");
Jakob Stoklund Olesen
committed
SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB);
assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
}
Jakob Stoklund Olesen
committed
// Handle live-through blocks.
ArrayRef<unsigned> ThroughBlocks = SA->getThroughBlocks();
for (unsigned i = 0; i != ThroughBlocks.size(); ++i) {
unsigned Number = ThroughBlocks[i];
bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
DEBUG(dbgs() << "Live through BB#" << Number << '\n');
if (RegIn && RegOut) {
Intf.moveToBlock(Number);
if (!Intf.hasInterference()) {
SE->useIntv(Indexes->getMBBStartIdx(Number),
Indexes->getMBBEndIdx(Number));
continue;
}
}
MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
if (RegIn)
SE->leaveIntvAtTop(*MBB);
if (RegOut)
SE->enterIntvAtEnd(*MBB);
}
Jakob Stoklund Olesen
committed
SE->closeIntv();
// FIXME: Should we be more aggressive about splitting the stack region into
// per-block segments? The current approach allows the stack region to
// separate into connected components. Some components may be allocatable.
Jakob Stoklund Olesen
committed
SE->finish();
if (VerifyEnabled)
MF->verify(this, "After splitting live range around region");
}
unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
BitVector LiveBundles, BestBundles;
float BestCost = 0;
unsigned BestReg = 0;
Jakob Stoklund Olesen
committed
Order.rewind();
Jakob Stoklund Olesen
committed
for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) {
if (GlobalCand.size() <= Cand)
GlobalCand.resize(Cand+1);
GlobalCand[Cand].PhysReg = PhysReg;
Jakob Stoklund Olesen
committed
SpillPlacer->prepare(LiveBundles);
float Cost;
if (!addSplitConstraints(PhysReg, Cost)) {
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bias\n");
continue;
}
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tbiased = "
<< SpillPlacer->getPositiveNodes() << ", static = " << Cost);
if (BestReg && Cost >= BestCost) {
Jakob Stoklund Olesen
committed
DEBUG(dbgs() << " worse than " << PrintReg(BestReg, TRI) << '\n');
continue;
Jakob Stoklund Olesen
committed
SpillPlacer->finish();
// No live bundles, defer to splitSingleBlocks().
if (!LiveBundles.any()) {
DEBUG(dbgs() << " no bundles.\n");
continue;
Jakob Stoklund Olesen
committed
Cost += calcGlobalSplitCost(PhysReg, LiveBundles);
DEBUG({
dbgs() << ", total = " << Cost << " with bundles";
for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
dbgs() << " EB#" << i;
dbgs() << ".\n";
});
if (!BestReg || Cost < BestCost) {
BestReg = PhysReg;
BestCost = 0.98f * Cost; // Prevent rounding effects.
BestBundles.swap(LiveBundles);
}
}
if (!BestReg)
return 0;
splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs);
Jakob Stoklund Olesen
committed
setStage(NewVRegs.begin(), NewVRegs.end(), RS_Region);
return 0;
}
//===----------------------------------------------------------------------===//
// Local Splitting
//===----------------------------------------------------------------------===//
/// calcGapWeights - Compute the maximum spill weight that needs to be evicted
/// in order to use PhysReg between two entries in SA->UseSlots.
///
/// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
///
void RAGreedy::calcGapWeights(unsigned PhysReg,
SmallVectorImpl<float> &GapWeight) {
Jakob Stoklund Olesen
committed
assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
const unsigned NumGaps = Uses.size()-1;
// Start and end points for the interference check.
SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
GapWeight.assign(NumGaps, 0.0f);
// Add interference from each overlapping register.
for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
.checkInterference())
continue;
// We know that VirtReg is a continuous interval from FirstUse to LastUse,
// so we don't need InterferenceQuery.
//
// Interference that overlaps an instruction is counted in both gaps
// surrounding the instruction. The exception is interference before
// StartIdx and after StopIdx.
//
LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
// Skip the gaps before IntI.
while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
if (++Gap == NumGaps)
break;
if (Gap == NumGaps)
break;
// Update the gaps covered by IntI.
const float weight = IntI.value()->weight;
for (; Gap != NumGaps; ++Gap) {
GapWeight[Gap] = std::max(GapWeight[Gap], weight);
if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
break;
}
if (Gap == NumGaps)
break;
}
}
}
/// getPrevMappedIndex - Return the slot index of the last non-copy instruction
/// before MI that has a slot index. If MI is the first mapped instruction in
/// its block, return the block start index instead.
///
SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
assert(MI && "Missing MachineInstr");
const MachineBasicBlock *MBB = MI->getParent();
MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
while (I != B)
if (!(--I)->isDebugValue() && !I->isCopy())
return Indexes->getInstructionIndex(I);
return Indexes->getMBBStartIdx(MBB);
}
/// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
/// real non-copy instruction for each instruction in SA->UseSlots.
///
void RAGreedy::calcPrevSlots() {
const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
PrevSlot.clear();
PrevSlot.reserve(Uses.size());
for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
}
}
/// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
/// be beneficial to split before UseSlots[i].
///
/// 0 is always a valid split point
unsigned RAGreedy::nextSplitPoint(unsigned i) {
const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
const unsigned Size = Uses.size();
assert(i != Size && "No split points after the end");
// Allow split before i when Uses[i] is not adjacent to the previous use.
while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
;
return i;
}
/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
/// basic block.
///
unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
Jakob Stoklund Olesen
committed
assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
// Note that it is possible to have an interval that is live-in or live-out
// while only covering a single block - A phi-def can use undef values from
// predecessors, and the block could be a single-block loop.
// We don't bother doing anything clever about such a case, we simply assume
// that the interval is continuous from FirstUse to LastUse. We should make
// sure that we don't do anything illegal to such an interval, though.
const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
if (Uses.size() <= 2)
return 0;
const unsigned NumGaps = Uses.size()-1;
DEBUG({
dbgs() << "tryLocalSplit: ";
for (unsigned i = 0, e = Uses.size(); i != e; ++i)
dbgs() << ' ' << SA->UseSlots[i];
dbgs() << '\n';
});
// For every use, find the previous mapped non-copy instruction.
// We use this to detect valid split points, and to estimate new interval
// sizes.
calcPrevSlots();
unsigned BestBefore = NumGaps;
unsigned BestAfter = 0;
float BestDiff = 0;
const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
SmallVector<float, 8> GapWeight;
Order.rewind();
while (unsigned PhysReg = Order.next()) {
// Keep track of the largest spill weight that would need to be evicted in
// order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
calcGapWeights(PhysReg, GapWeight);
// Try to find the best sequence of gaps to close.
// The new spill weight must be larger than any gap interference.
// We will split before Uses[SplitBefore] and after Uses[SplitAfter].
unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
// MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
// It is the spill weight that needs to be evicted.
float MaxGap = GapWeight[0];