Newer
Older
//===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the RAGreedy function pass for register allocation in
// optimized builds.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "regalloc"
Jakob Stoklund Olesen
committed
#include "AllocationOrder.h"
Jakob Stoklund Olesen
committed
#include "InterferenceCache.h"
#include "LiveDebugVariables.h"
#include "LiveRangeEdit.h"
#include "RegAllocBase.h"
#include "Spiller.h"
#include "SpillPlacement.h"
Jakob Stoklund Olesen
committed
#include "SplitKit.h"
#include "RegisterCoalescer.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Function.h"
#include "llvm/PassAnalysisSupport.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/EdgeBundles.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
Jakob Stoklund Olesen
committed
#include "llvm/CodeGen/MachineLoopRanges.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
Jakob Stoklund Olesen
committed
#include "llvm/Support/Timer.h"
Jakob Stoklund Olesen
committed
#include <queue>
STATISTIC(NumGlobalSplits, "Number of split global live ranges");
STATISTIC(NumLocalSplits, "Number of split local live ranges");
STATISTIC(NumEvicted, "Number of interferences evicted");
static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
createGreedyRegisterAllocator);
namespace {
class RAGreedy : public MachineFunctionPass,
public RegAllocBase,
private LiveRangeEdit::Delegate {
// context
MachineFunction *MF;
// analyses
SlotIndexes *Indexes;
MachineDominatorTree *DomTree;
Jakob Stoklund Olesen
committed
MachineLoopInfo *Loops;
MachineLoopRanges *LoopRanges;
EdgeBundles *Bundles;
SpillPlacement *SpillPlacer;
LiveDebugVariables *DebugVars;
// state
std::auto_ptr<Spiller> SpillerInstance;
Jakob Stoklund Olesen
committed
std::priority_queue<std::pair<unsigned, unsigned> > Queue;
unsigned NextCascade;
Jakob Stoklund Olesen
committed
// Live ranges pass through a number of stages as we try to allocate them.
// Some of the stages may also create new live ranges:
//
// - Region splitting.
// - Per-block splitting.
// - Local splitting.
// - Spilling.
//
// Ranges produced by one of the stages skip the previous stages when they are
// dequeued. This improves performance because we can skip interference checks
// that are unlikely to give any results. It also guarantees that the live
// range splitting algorithm terminates, something that is otherwise hard to
// ensure.
enum LiveRangeStage {
RS_New, ///< Never seen before.
RS_First, ///< First time in the queue.
Jakob Stoklund Olesen
committed
RS_Second, ///< Second time in the queue.
RS_Global, ///< Produced by global splitting.
Jakob Stoklund Olesen
committed
RS_Local, ///< Produced by local splitting.
RS_Spill ///< Produced by spilling.
};
static const char *const StageName[];
// RegInfo - Keep additional information about each live range.
struct RegInfo {
LiveRangeStage Stage;
// Cascade - Eviction loop prevention. See canEvictInterference().
unsigned Cascade;
RegInfo() : Stage(RS_New), Cascade(0) {}
};
IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
Jakob Stoklund Olesen
committed
LiveRangeStage getStage(const LiveInterval &VirtReg) const {
return ExtraRegInfo[VirtReg.reg].Stage;
}
void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
ExtraRegInfo.resize(MRI->getNumVirtRegs());
ExtraRegInfo[VirtReg.reg].Stage = Stage;
Jakob Stoklund Olesen
committed
}
template<typename Iterator>
void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
ExtraRegInfo.resize(MRI->getNumVirtRegs());
for (;Begin != End; ++Begin) {
unsigned Reg = (*Begin)->reg;
if (ExtraRegInfo[Reg].Stage == RS_New)
ExtraRegInfo[Reg].Stage = NewStage;
Jakob Stoklund Olesen
committed
}
/// Cost of evicting interference.
struct EvictionCost {
unsigned BrokenHints; ///< Total number of broken hints.
float MaxWeight; ///< Maximum spill weight evicted.
EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {}
bool operator<(const EvictionCost &O) const {
if (BrokenHints != O.BrokenHints)
return BrokenHints < O.BrokenHints;
return MaxWeight < O.MaxWeight;
}
};
// splitting state.
Jakob Stoklund Olesen
committed
std::auto_ptr<SplitAnalysis> SA;
Jakob Stoklund Olesen
committed
std::auto_ptr<SplitEditor> SE;
/// Cached per-block interference maps
InterferenceCache IntfCache;
Jakob Stoklund Olesen
committed
/// All basic blocks where the current register has uses.
Jakob Stoklund Olesen
committed
SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
Jakob Stoklund Olesen
committed
/// Global live range splitting candidate info.
struct GlobalSplitCandidate {
unsigned PhysReg;
BitVector LiveBundles;
SmallVector<unsigned, 8> ActiveBlocks;
void reset(unsigned Reg) {
PhysReg = Reg;
LiveBundles.clear();
ActiveBlocks.clear();
}
Jakob Stoklund Olesen
committed
};
/// Candidate info for for each PhysReg in AllocationOrder.
/// This vector never shrinks, but grows to the size of the largest register
/// class.
SmallVector<GlobalSplitCandidate, 32> GlobalCand;
public:
RAGreedy();
/// Return the pass name.
virtual const char* getPassName() const {
Jakob Stoklund Olesen
committed
return "Greedy Register Allocator";
}
/// RAGreedy analysis usage.
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
virtual void releaseMemory();
virtual Spiller &spiller() { return *SpillerInstance; }
Jakob Stoklund Olesen
committed
virtual void enqueue(LiveInterval *LI);
virtual LiveInterval *dequeue();
virtual unsigned selectOrSplit(LiveInterval&,
SmallVectorImpl<LiveInterval*>&);
/// Perform register allocation.
virtual bool runOnMachineFunction(MachineFunction &mf);
static char ID;
private:
void LRE_WillEraseInstruction(MachineInstr*);
bool LRE_CanEraseVirtReg(unsigned);
Jakob Stoklund Olesen
committed
void LRE_WillShrinkVirtReg(unsigned);
void LRE_DidCloneVirtReg(unsigned, unsigned);
Jakob Stoklund Olesen
committed
float calcSpillCost();
Jakob Stoklund Olesen
committed
bool addSplitConstraints(InterferenceCache::Cursor, float&);
void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
void growRegion(GlobalSplitCandidate &Cand, InterferenceCache::Cursor);
float calcGlobalSplitCost(GlobalSplitCandidate&, InterferenceCache::Cursor);
void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&,
SmallVectorImpl<LiveInterval*>&);
void calcGapWeights(unsigned, SmallVectorImpl<float>&);
bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
void evictInterference(LiveInterval&, unsigned,
SmallVectorImpl<LiveInterval*>&);
unsigned tryAssign(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&);
unsigned tryEvict(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&);
unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&);
unsigned trySplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&);
};
} // end anonymous namespace
char RAGreedy::ID = 0;
#ifndef NDEBUG
const char *const RAGreedy::StageName[] = {
"RS_New",
"RS_First",
"RS_Second",
"RS_Global",
"RS_Local",
"RS_Spill"
};
#endif
Jakob Stoklund Olesen
committed
// Hysteresis to use when comparing floats.
// This helps stabilize decisions based on float comparisons.
const float Hysteresis = 0.98f;
FunctionPass* llvm::createGreedyRegisterAllocator() {
return new RAGreedy();
}
RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesen
committed
initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
}
void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
AU.addRequired<LiveIntervals>();
AU.addRequired<SlotIndexes>();
AU.addRequired<LiveDebugVariables>();
AU.addPreserved<LiveDebugVariables>();
if (StrongPHIElim)
AU.addRequiredID(StrongPHIEliminationID);
AU.addRequiredTransitive<RegisterCoalescer>();
AU.addRequired<CalculateSpillWeights>();
AU.addRequired<LiveStacks>();
AU.addPreserved<LiveStacks>();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
Jakob Stoklund Olesen
committed
AU.addRequired<MachineLoopRanges>();
AU.addPreserved<MachineLoopRanges>();
AU.addRequired<VirtRegMap>();
AU.addPreserved<VirtRegMap>();
AU.addRequired<EdgeBundles>();
AU.addRequired<SpillPlacement>();
MachineFunctionPass::getAnalysisUsage(AU);
}
//===----------------------------------------------------------------------===//
// LiveRangeEdit delegate methods
//===----------------------------------------------------------------------===//
void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
// LRE itself will remove from SlotIndexes and parent basic block.
VRM->RemoveMachineInstrFromMaps(MI);
}
bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
unassign(LIS->getInterval(VirtReg), PhysReg);
return true;
}
// Unassigned virtreg is probably in the priority queue.
// RegAllocBase will erase it after dequeueing.
return false;
}
Jakob Stoklund Olesen
committed
void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
unsigned PhysReg = VRM->getPhys(VirtReg);
if (!PhysReg)
return;
// Register is assigned, put it back on the queue for reassignment.
LiveInterval &LI = LIS->getInterval(VirtReg);
unassign(LI, PhysReg);
enqueue(&LI);
}
void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
// LRE may clone a virtual register because dead code elimination causes it to
// be split into connected components. Ensure that the new register gets the
// same stage as the parent.
ExtraRegInfo.grow(New);
ExtraRegInfo[New] = ExtraRegInfo[Old];
void RAGreedy::releaseMemory() {
SpillerInstance.reset(0);
ExtraRegInfo.clear();
RegAllocBase::releaseMemory();
}
Jakob Stoklund Olesen
committed
void RAGreedy::enqueue(LiveInterval *LI) {
// Prioritize live ranges by size, assigning larger ranges first.
// The queue holds (size, reg) pairs.
const unsigned Size = LI->getSize();
const unsigned Reg = LI->reg;
Jakob Stoklund Olesen
committed
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"Can only enqueue virtual registers");
unsigned Prio;
ExtraRegInfo.grow(Reg);
if (ExtraRegInfo[Reg].Stage == RS_New)
ExtraRegInfo[Reg].Stage = RS_First;
if (ExtraRegInfo[Reg].Stage == RS_Second)
// Unsplit ranges that couldn't be allocated immediately are deferred until
// everything else has been allocated. Long ranges are allocated last so
// they are split against realistic interference.
Prio = (1u << 31) - Size;
else {
// Everything else is allocated in long->short order. Long ranges that don't
// fit should be spilled ASAP so they don't create interference.
Prio = (1u << 31) + Size;
Jakob Stoklund Olesen
committed
// Boost ranges that have a physical register hint.
if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
Prio |= (1u << 30);
}
Queue.push(std::make_pair(Prio, Reg));
}
Jakob Stoklund Olesen
committed
LiveInterval *RAGreedy::dequeue() {
if (Queue.empty())
return 0;
LiveInterval *LI = &LIS->getInterval(Queue.top().second);
Queue.pop();
return LI;
}
Jakob Stoklund Olesen
committed
//===----------------------------------------------------------------------===//
// Direct Assignment
//===----------------------------------------------------------------------===//
/// tryAssign - Try to assign VirtReg to an available register.
unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
AllocationOrder &Order,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
Order.rewind();
unsigned PhysReg;
while ((PhysReg = Order.next()))
if (!checkPhysRegInterference(VirtReg, PhysReg))
break;
if (!PhysReg || Order.isHint(PhysReg))
return PhysReg;
// PhysReg is available, but there may be a better choice.
// If we missed a simple hint, try to cheaply evict interference from the
// preferred register.
if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
if (Order.isHint(Hint)) {
DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
EvictionCost MaxCost(1);
if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
evictInterference(VirtReg, Hint, NewVRegs);
return Hint;
}
}
// Try to evict interference from a cheaper alternative.
unsigned Cost = TRI->getCostPerUse(PhysReg);
// Most registers have 0 additional cost.
if (!Cost)
return PhysReg;
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
<< '\n');
unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
return CheapReg ? CheapReg : PhysReg;
}
//===----------------------------------------------------------------------===//
// Interference eviction
//===----------------------------------------------------------------------===//
Jakob Stoklund Olesen
committed
/// shouldEvict - determine if A should evict the assigned live range B. The
/// eviction policy defined by this function together with the allocation order
/// defined by enqueue() decides which registers ultimately end up being split
/// and spilled.
/// Cascade numbers are used to prevent infinite loops if this function is a
/// cyclic relation.
///
/// @param A The live range to be assigned.
/// @param IsHint True when A is about to be assigned to its preferred
/// register.
/// @param B The live range to be evicted.
/// @param BreaksHint True when B is already assigned to its preferred register.
bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
LiveInterval &B, bool BreaksHint) {
bool CanSplit = getStage(B) <= RS_Second;
// Be fairly aggressive about following hints as long as the evictee can be
// split.
if (CanSplit && IsHint && !BreaksHint)
return true;
return A.weight > B.weight;
/// canEvictInterference - Return true if all interferences between VirtReg and
/// PhysReg can be evicted. When OnlyCheap is set, don't do anything
///
/// @param VirtReg Live range that is about to be assigned.
/// @param PhysReg Desired register for assignment.
/// @prarm IsHint True when PhysReg is VirtReg's preferred register.
/// @param MaxCost Only look for cheaper candidates and update with new cost
/// when returning true.
/// @returns True when interference can be evicted cheaper than MaxCost.
bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
bool IsHint, EvictionCost &MaxCost) {
// Find VirtReg's cascade number. This will be unassigned if VirtReg was never
// involved in an eviction before. If a cascade number was assigned, deny
// evicting anything with the same or a newer cascade number. This prevents
// infinite eviction loops.
//
// This works out so a register without a cascade number is allowed to evict
// anything, and it can be evicted by anything.
unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
if (!Cascade)
Cascade = NextCascade;
for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
Jakob Stoklund Olesen
committed
// If there is 10 or more interferences, chances are one is heavier.
if (Q.collectInterferingVRegs(10) >= 10)
return false;
Jakob Stoklund Olesen
committed
// Check if any interfering live range is heavier than MaxWeight.
for (unsigned i = Q.interferingVRegs().size(); i; --i) {
LiveInterval *Intf = Q.interferingVRegs()[i - 1];
if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
return false;
// Never evict spill products. They cannot split or spill.
if (getStage(*Intf) == RS_Spill)
return false;
// Once a live range becomes small enough, it is urgent that we find a
// register for it. This is indicated by an infinite spill weight. These
// urgent live ranges get to evict almost anything.
bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable();
// Only evict older cascades or live ranges without a cascade.
unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
if (Cascade <= IntfCascade) {
if (!Urgent)
return false;
// We permit breaking cascades for urgent evictions. It should be the
// last resort, though, so make it really expensive.
Cost.BrokenHints += 10;
}
// Would this break a satisfied hint?
bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
// Update eviction cost.
Cost.BrokenHints += BreaksHint;
Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
// Abort if this would be too expensive.
if (!(Cost < MaxCost))
// Finally, apply the eviction policy for non-urgent evictions.
if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
Jakob Stoklund Olesen
committed
return false;
Jakob Stoklund Olesen
committed
}
}
return true;
}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
/// evictInterference - Evict any interferring registers that prevent VirtReg
/// from being assigned to Physreg. This assumes that canEvictInterference
/// returned true.
void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
// Make sure that VirtReg has a cascade number, and assign that cascade
// number to every evicted register. These live ranges than then only be
// evicted by a newer cascade, preventing infinite loops.
unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
if (!Cascade)
Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
<< " interference: Cascade " << Cascade << '\n');
for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
LiveInterval *Intf = Q.interferingVRegs()[i];
unassign(*Intf, VRM->getPhys(Intf->reg));
assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
VirtReg.isSpillable() < Intf->isSpillable()) &&
"Cannot decrease cascade number, illegal eviction");
ExtraRegInfo[Intf->reg].Cascade = Cascade;
++NumEvicted;
NewVRegs.push_back(Intf);
}
}
}
/// tryEvict - Try to evict all interferences for a physreg.
Jakob Stoklund Olesen
committed
/// @param VirtReg Currently unassigned virtual register.
/// @param Order Physregs to try.
/// @return Physreg to assign VirtReg, or 0.
unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
AllocationOrder &Order,
SmallVectorImpl<LiveInterval*> &NewVRegs,
unsigned CostPerUseLimit) {
NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
// Keep track of the cheapest interference seen so far.
EvictionCost BestCost(~0u);
unsigned BestPhys = 0;
// When we are just looking for a reduced cost per use, don't break any
// hints, and only evict smaller spill weights.
if (CostPerUseLimit < ~0u) {
BestCost.BrokenHints = 0;
BestCost.MaxWeight = VirtReg.weight;
}
Order.rewind();
while (unsigned PhysReg = Order.next()) {
if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
continue;
// The first use of a callee-saved register in a function has cost 1.
// Don't start using a CSR when the CostPerUseLimit is low.
if (CostPerUseLimit == 1)
if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
if (!MRI->isPhysRegUsed(CSR)) {
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
<< PrintReg(CSR, TRI) << '\n');
continue;
}
if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
continue;
// Best so far.
BestPhys = PhysReg;
Jakob Stoklund Olesen
committed
// Stop if the hint can be used.
if (Order.isHint(PhysReg))
break;
Jakob Stoklund Olesen
committed
}
if (!BestPhys)
return 0;
evictInterference(VirtReg, BestPhys, NewVRegs);
return BestPhys;
Jakob Stoklund Olesen
committed
//===----------------------------------------------------------------------===//
// Region Splitting
//===----------------------------------------------------------------------===//
Jakob Stoklund Olesen
committed
/// addSplitConstraints - Fill out the SplitConstraints vector based on the
/// interference pattern in Physreg and its aliases. Add the constraints to
/// SpillPlacement and return the static cost of this split in Cost, assuming
/// that all preferences in SplitConstraints are met.
Jakob Stoklund Olesen
committed
/// Return false if there are no bundles with positive bias.
bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
float &Cost) {
Jakob Stoklund Olesen
committed
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
// Reset interference dependent info.
Jakob Stoklund Olesen
committed
SplitConstraints.resize(UseBlocks.size());
Jakob Stoklund Olesen
committed
float StaticCost = 0;
Jakob Stoklund Olesen
committed
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
Jakob Stoklund Olesen
committed
SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
Jakob Stoklund Olesen
committed
BC.Number = BI.MBB->getNumber();
Intf.moveToBlock(BC.Number);
Jakob Stoklund Olesen
committed
BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
if (!Intf.hasInterference())
continue;
Jakob Stoklund Olesen
committed
// Number of spill code instructions to insert.
unsigned Ins = 0;
// Interference for the live-in value.
Jakob Stoklund Olesen
committed
if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
Jakob Stoklund Olesen
committed
BC.Entry = SpillPlacement::MustSpill, ++Ins;
else if (Intf.first() < BI.FirstUse)
Jakob Stoklund Olesen
committed
BC.Entry = SpillPlacement::PrefSpill, ++Ins;
Jakob Stoklund Olesen
committed
else if (Intf.first() < BI.LastUse)
Jakob Stoklund Olesen
committed
++Ins;
Jakob Stoklund Olesen
committed
// Interference for the live-out value.
Jakob Stoklund Olesen
committed
if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
Jakob Stoklund Olesen
committed
BC.Exit = SpillPlacement::MustSpill, ++Ins;
else if (Intf.last() > BI.LastUse)
Jakob Stoklund Olesen
committed
BC.Exit = SpillPlacement::PrefSpill, ++Ins;
Jakob Stoklund Olesen
committed
else if (Intf.last() > BI.FirstUse)
Jakob Stoklund Olesen
committed
++Ins;
}
Jakob Stoklund Olesen
committed
// Accumulate the total frequency of inserted spill code.
if (Ins)
StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
}
Jakob Stoklund Olesen
committed
Cost = StaticCost;
Jakob Stoklund Olesen
committed
Jakob Stoklund Olesen
committed
// Add constraints for use-blocks. Note that these are the only constraints
// that may add a positive bias, it is downhill from here.
SpillPlacer->addConstraints(SplitConstraints);
Jakob Stoklund Olesen
committed
return SpillPlacer->scanActiveBundles();
}
Jakob Stoklund Olesen
committed
Jakob Stoklund Olesen
committed
/// addThroughConstraints - Add constraints and links to SpillPlacer from the
/// live-through blocks in Blocks.
void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
ArrayRef<unsigned> Blocks) {
Jakob Stoklund Olesen
committed
const unsigned GroupSize = 8;
SpillPlacement::BlockConstraint BCS[GroupSize];
Jakob Stoklund Olesen
committed
unsigned TBS[GroupSize];
unsigned B = 0, T = 0;
Jakob Stoklund Olesen
committed
Jakob Stoklund Olesen
committed
for (unsigned i = 0; i != Blocks.size(); ++i) {
unsigned Number = Blocks[i];
Jakob Stoklund Olesen
committed
Intf.moveToBlock(Number);
Jakob Stoklund Olesen
committed
if (!Intf.hasInterference()) {
Jakob Stoklund Olesen
committed
assert(T < GroupSize && "Array overflow");
TBS[T] = Number;
if (++T == GroupSize) {
SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
T = 0;
}
Jakob Stoklund Olesen
committed
continue;
Jakob Stoklund Olesen
committed
}
Jakob Stoklund Olesen
committed
assert(B < GroupSize && "Array overflow");
BCS[B].Number = Number;
Jakob Stoklund Olesen
committed
// Interference for the live-in value.
if (Intf.first() <= Indexes->getMBBStartIdx(Number))
BCS[B].Entry = SpillPlacement::MustSpill;
else
BCS[B].Entry = SpillPlacement::PrefSpill;
// Interference for the live-out value.
if (Intf.last() >= SA->getLastSplitPoint(Number))
BCS[B].Exit = SpillPlacement::MustSpill;
else
BCS[B].Exit = SpillPlacement::PrefSpill;
Jakob Stoklund Olesen
committed
if (++B == GroupSize) {
ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
SpillPlacer->addConstraints(Array);
B = 0;
}
Jakob Stoklund Olesen
committed
}
Jakob Stoklund Olesen
committed
ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
SpillPlacer->addConstraints(Array);
Jakob Stoklund Olesen
committed
SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
}
void RAGreedy::growRegion(GlobalSplitCandidate &Cand,
InterferenceCache::Cursor Intf) {
// Keep track of through blocks that have not been added to SpillPlacer.
BitVector Todo = SA->getThroughBlocks();
SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
unsigned AddedTo = 0;
Jakob Stoklund Olesen
committed
#ifndef NDEBUG
unsigned Visited = 0;
#endif
Jakob Stoklund Olesen
committed
for (;;) {
ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
// Find new through blocks in the periphery of PrefRegBundles.
for (int i = 0, e = NewBundles.size(); i != e; ++i) {
unsigned Bundle = NewBundles[i];
// Look at all blocks connected to Bundle in the full graph.
ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
I != E; ++I) {
unsigned Block = *I;
Jakob Stoklund Olesen
committed
continue;
Jakob Stoklund Olesen
committed
// This is a new through block. Add it to SpillPlacer later.
Jakob Stoklund Olesen
committed
#ifndef NDEBUG
++Visited;
#endif
}
}
// Any new blocks to add?
if (ActiveBlocks.size() == AddedTo)
break;
addThroughConstraints(Intf,
ArrayRef<unsigned>(ActiveBlocks).slice(AddedTo));
AddedTo = ActiveBlocks.size();
Jakob Stoklund Olesen
committed
// Perhaps iterating can enable more bundles?
SpillPlacer->iterate();
}
DEBUG(dbgs() << ", v=" << Visited);
}
Jakob Stoklund Olesen
committed
Jakob Stoklund Olesen
committed
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
/// calcSpillCost - Compute how expensive it would be to split the live range in
/// SA around all use blocks instead of forming bundle regions.
float RAGreedy::calcSpillCost() {
float Cost = 0;
const LiveInterval &LI = SA->getParent();
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
unsigned Number = BI.MBB->getNumber();
// We normally only need one spill instruction - a load or a store.
Cost += SpillPlacer->getBlockFrequency(Number);
// Unless the value is redefined in the block.
if (BI.LiveIn && BI.LiveOut) {
SlotIndex Start, Stop;
tie(Start, Stop) = Indexes->getMBBRange(Number);
LiveInterval::const_iterator I = LI.find(Start);
assert(I != LI.end() && "Expected live-in value");
// Is there a different live-out value? If so, we need an extra spill
// instruction.
if (I->end < Stop)
Cost += SpillPlacer->getBlockFrequency(Number);
}
}
return Cost;
}
/// calcGlobalSplitCost - Return the global split cost of following the split
/// pattern in LiveBundles. This cost should be added to the local cost of the
Jakob Stoklund Olesen
committed
/// interference pattern in SplitConstraints.
///
float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand,
InterferenceCache::Cursor Intf) {
float GlobalCost = 0;
const BitVector &LiveBundles = Cand.LiveBundles;
Jakob Stoklund Olesen
committed
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
Jakob Stoklund Olesen
committed
SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
unsigned Ins = 0;
Jakob Stoklund Olesen
committed
if (BI.LiveIn)
Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
if (BI.LiveOut)
Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
if (Ins)
GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
}
Jakob Stoklund Olesen
committed
for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
unsigned Number = Cand.ActiveBlocks[i];
Jakob Stoklund Olesen
committed
bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
Jakob Stoklund Olesen
committed
if (!RegIn && !RegOut)
continue;
if (RegIn && RegOut) {
// We need double spill code if this block has interference.
Intf.moveToBlock(Number);
if (Intf.hasInterference())
Jakob Stoklund Olesen
committed
GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
continue;
}
// live-in / stack-out or stack-in live-out.
GlobalCost += SpillPlacer->getBlockFrequency(Number);
Jakob Stoklund Olesen
committed
}
return GlobalCost;
}
/// splitAroundRegion - Split VirtReg around the region determined by
/// LiveBundles. Make an effort to avoid interference from PhysReg.
///
/// The 'register' interval is going to contain as many uses as possible while
/// avoiding interference. The 'stack' interval is the complement constructed by
/// SplitEditor. It will contain the rest.
///
void RAGreedy::splitAroundRegion(LiveInterval &VirtReg,
GlobalSplitCandidate &Cand,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
const BitVector &LiveBundles = Cand.LiveBundles;
DEBUG({
dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI)
<< " with bundles";
for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
dbgs() << " EB#" << i;
dbgs() << ".\n";
});
InterferenceCache::Cursor Intf(IntfCache, Cand.PhysReg);
LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
Jakob Stoklund Olesen
committed
SE->reset(LREdit);
// Create the main cross-block interval.
const unsigned MainIntv = SE->openIntv();
// First handle all the blocks with uses.
Jakob Stoklund Olesen
committed
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
bool RegIn = BI.LiveIn &&
LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
bool RegOut = BI.LiveOut &&
LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
// Create separate intervals for isolated blocks with multiple uses.
//
// |---o---o---| Enter and leave on the stack.
// ____-----____ Create local interval for uses.
//
// | o---o---| Defined in block, leave on stack.
// -----____ Create local interval for uses.
//
// |---o---x | Enter on stack, killed in block.
// ____----- Create local interval for uses.
//
if (!RegIn && !RegOut) {
DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
if (!BI.isOneInstr()) {
SE->splitSingleBlock(BI);
SE->selectIntv(MainIntv);
}
continue;
}
Jakob Stoklund Olesen
committed
SlotIndex Start, Stop;
tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
Intf.moveToBlock(BI.MBB->getNumber());
DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
<< "BB#" << BI.MBB->getNumber()
<< " EB#" << Bundles->getBundle(BI.MBB->getNumber(), 1)
Jakob Stoklund Olesen
committed
<< " [" << Start << ';'
<< SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
<< ") uses [" << BI.FirstUse << ';' << BI.LastUse
Jakob Stoklund Olesen
committed
<< ") intf [" << Intf.first() << ';' << Intf.last() << ')');
Jakob Stoklund Olesen
committed
// The interference interval should either be invalid or overlap MBB.
Jakob Stoklund Olesen
committed
assert((!Intf.hasInterference() || Intf.first() < Stop)
&& "Bad interference");
Jakob Stoklund Olesen
committed
assert((!Intf.hasInterference() || Intf.last() > Start)
Jakob Stoklund Olesen
committed
&& "Bad interference");
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
// We are now ready to decide where to split in the current block. There
// are many variables guiding the decision:
//
// - RegIn / RegOut: The global splitting algorithm's decisions for our
// ingoing and outgoing bundles.
//
// - BI.BlockIn / BI.BlockOut: Is the live range live-in and/or live-out
// from this block.
//
// - Intf.hasInterference(): Is there interference in this block.
//
// - Intf.first() / Inft.last(): The range of interference.
//
// The live range should be split such that MainIntv is live-in when RegIn
// is set, and live-out when RegOut is set. MainIntv should never overlap
// the interference, and the stack interval should never have more than one
// use per block.
// No splits can be inserted after LastSplitPoint, overlap instead.
SlotIndex LastSplitPoint = Stop;
if (BI.LiveOut)
LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
// At this point, we know that either RegIn or RegOut is set. We dealt with
// the all-stack case above.
// Blocks without interference are relatively easy.
if (!Intf.hasInterference()) {
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
DEBUG(dbgs() << ", no interference.\n");
SE->selectIntv(MainIntv);
// The easiest case has MainIntv live through.
//
// |---o---o---| Live-in, live-out.
// ============= Use MainIntv everywhere.
//
SlotIndex From = Start, To = Stop;
// Block entry. Reload before the first use if MainIntv is not live-in.
//
// |---o-- Enter on stack.
// ____=== Reload before first use.
//
// | o-- Defined in block.
// === Use MainIntv from def.
//
if (!RegIn)
From = SE->enterIntvBefore(BI.FirstUse);
// Block exit. Handle cases where MainIntv is not live-out.
if (!BI.LiveOut)
//
// --x | Killed in block.
// === Use MainIntv up to kill.
//
To = SE->leaveIntvAfter(BI.LastUse);
else if (!RegOut) {
//
// --o---| Live-out on stack.
// ===____ Use MainIntv up to last use, switch to stack.
//
// -----o| Live-out on stack, last use after last split point.
// ====== Extend MainIntv to last use, overlapping.
// \____ Copy to stack interval before last split point.
//
if (BI.LastUse < LastSplitPoint)
To = SE->leaveIntvAfter(BI.LastUse);
else {
// The last use is after the last split point, it is probably an
// indirect branch.
To = SE->leaveIntvBefore(LastSplitPoint);
// Run a double interval from the split to the last use. This makes
// it possible to spill the complement without affecting the indirect
// branch.
SE->overlapIntv(To, BI.LastUse);
}
}
// Paint in MainIntv liveness for this block.
SE->useIntv(From, To);
continue;
}
// We are now looking at a block with interference, and we know that either
// RegIn or RegOut is set.
assert(Intf.hasInterference() && (RegIn || RegOut) && "Bad invariant");
// If the live range is not live through the block, it is possible that the
// interference doesn't even overlap. Deal with those cases first. Since
// no copy instructions are required, we can tolerate interference starting
// or ending at the same instruction that kills or defines our live range.