Newer
Older
//===-- InterferenceCache.cpp - Caching per-block interference ---------*--===//
Jakob Stoklund Olesen
committed
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// InterferenceCache remembers per-block interference in LiveIntervalUnions.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "regalloc"
#include "InterferenceCache.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Jakob Stoklund Olesen
committed
using namespace llvm;
// Static member used for null interference cursors.
InterferenceCache::BlockInterference InterferenceCache::Cursor::NoInterference;
Jakob Stoklund Olesen
committed
void InterferenceCache::init(MachineFunction *mf,
LiveIntervalUnion *liuarray,
SlotIndexes *indexes,
LiveIntervals *lis,
const TargetRegisterInfo *tri) {
Jakob Stoklund Olesen
committed
MF = mf;
LIUArray = liuarray;
TRI = tri;
PhysRegEntries.assign(TRI->getNumRegs(), 0);
for (unsigned i = 0; i != CacheEntries; ++i)
Entries[i].clear(mf, indexes, lis);
Jakob Stoklund Olesen
committed
}
InterferenceCache::Entry *InterferenceCache::get(unsigned PhysReg) {
unsigned E = PhysRegEntries[PhysReg];
if (E < CacheEntries && Entries[E].getPhysReg() == PhysReg) {
if (!Entries[E].valid(LIUArray, TRI))
Entries[E].revalidate();
return &Entries[E];
}
// No valid entry exists, pick the next round-robin entry.
E = RoundRobin;
if (++RoundRobin == CacheEntries)
RoundRobin = 0;
for (unsigned i = 0; i != CacheEntries; ++i) {
// Skip entries that are in use.
if (Entries[E].hasRefs()) {
if (++E == CacheEntries)
E = 0;
continue;
}
Entries[E].reset(PhysReg, LIUArray, TRI, MF);
PhysRegEntries[PhysReg] = E;
return &Entries[E];
}
llvm_unreachable("Ran out of interference cache entries.");
Jakob Stoklund Olesen
committed
}
/// revalidate - LIU contents have changed, update tags.
void InterferenceCache::Entry::revalidate() {
// Invalidate all block entries.
++Tag;
// Invalidate all iterators.
PrevPos = SlotIndex();
for (unsigned i = 0, e = Aliases.size(); i != e; ++i)
Aliases[i].second = Aliases[i].first->getTag();
}
void InterferenceCache::Entry::reset(unsigned physReg,
LiveIntervalUnion *LIUArray,
const TargetRegisterInfo *TRI,
const MachineFunction *MF) {
assert(!hasRefs() && "Cannot reset cache entry with references");
Jakob Stoklund Olesen
committed
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
// LIU's changed, invalidate cache.
++Tag;
PhysReg = physReg;
Blocks.resize(MF->getNumBlockIDs());
Aliases.clear();
for (const unsigned *AS = TRI->getOverlaps(PhysReg); *AS; ++AS) {
LiveIntervalUnion *LIU = LIUArray + *AS;
Aliases.push_back(std::make_pair(LIU, LIU->getTag()));
}
// Reset iterators.
PrevPos = SlotIndex();
unsigned e = Aliases.size();
Iters.resize(e);
for (unsigned i = 0; i != e; ++i)
Iters[i].setMap(Aliases[i].first->getMap());
}
bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray,
const TargetRegisterInfo *TRI) {
unsigned i = 0, e = Aliases.size();
for (const unsigned *AS = TRI->getOverlaps(PhysReg); *AS; ++AS, ++i) {
LiveIntervalUnion *LIU = LIUArray + *AS;
if (i == e || Aliases[i].first != LIU)
return false;
if (LIU->changedSince(Aliases[i].second))
return false;
}
return i == e;
}
// Test if a register mask clobbers PhysReg.
static inline bool maskClobber(const uint32_t *Mask, unsigned PhysReg) {
return !(Mask[PhysReg/32] & (1u << PhysReg%32));
}
Jakob Stoklund Olesen
committed
void InterferenceCache::Entry::update(unsigned MBBNum) {
SlotIndex Start, Stop;
tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
// Use advanceTo only when possible.
if (PrevPos != Start) {
if (!PrevPos.isValid() || Start < PrevPos)
for (unsigned i = 0, e = Iters.size(); i != e; ++i)
Iters[i].find(Start);
else
for (unsigned i = 0, e = Iters.size(); i != e; ++i)
Iters[i].advanceTo(Start);
PrevPos = Start;
}
Jakob Stoklund Olesen
committed
Jakob Stoklund Olesen
committed
MachineFunction::const_iterator MFI = MF->getBlockNumbered(MBBNum);
BlockInterference *BI = &Blocks[MBBNum];
ArrayRef<SlotIndex> RegMaskSlots;
ArrayRef<const uint32_t*> RegMaskBits;
Jakob Stoklund Olesen
committed
for (;;) {
BI->Tag = Tag;
BI->First = BI->Last = SlotIndex();
// Check for first interference.
for (unsigned i = 0, e = Iters.size(); i != e; ++i) {
Iter &I = Iters[i];
if (!I.valid())
continue;
SlotIndex StartI = I.start();
if (StartI >= Stop)
continue;
if (!BI->First.isValid() || StartI < BI->First)
BI->First = StartI;
}
Jakob Stoklund Olesen
committed
// Also check for register mask interference.
RegMaskSlots = LIS->getRegMaskSlotsInBlock(MBBNum);
RegMaskBits = LIS->getRegMaskBitsInBlock(MBBNum);
SlotIndex Limit = BI->First.isValid() ? BI->First : Stop;
for (unsigned i = 0, e = RegMaskSlots.size();
i != e && RegMaskSlots[i] < Limit; ++i)
if (maskClobber(RegMaskBits[i], PhysReg)) {
// Register mask i clobbers PhysReg before the LIU interference.
BI->First = RegMaskSlots[i];
break;
}
Jakob Stoklund Olesen
committed
PrevPos = Stop;
if (BI->First.isValid())
break;
// No interference in this block? Go ahead and precompute the next block.
if (++MFI == MF->end())
return;
MBBNum = MFI->getNumber();
BI = &Blocks[MBBNum];
if (BI->Tag == Tag)
return;
tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
}
Jakob Stoklund Olesen
committed
Jakob Stoklund Olesen
committed
// Check for last interference in block.
Jakob Stoklund Olesen
committed
for (unsigned i = 0, e = Iters.size(); i != e; ++i) {
Iter &I = Iters[i];
if (!I.valid() || I.start() >= Stop)
continue;
I.advanceTo(Stop);
bool Backup = !I.valid() || I.start() >= Stop;
if (Backup)
Jakob Stoklund Olesen
committed
--I;
SlotIndex StopI = I.stop();
if (!BI->Last.isValid() || StopI > BI->Last)
BI->Last = StopI;
if (Backup)
++I;
Jakob Stoklund Olesen
committed
}
// Also check for register mask interference.
SlotIndex Limit = BI->Last.isValid() ? BI->Last : Start;
for (unsigned i = RegMaskSlots.size(); i && RegMaskSlots[i-1] > Limit; --i)
if (maskClobber(RegMaskBits[i-1], PhysReg)) {
// Register mask i-1 clobbers PhysReg after the LIU interference.
// Model the regmask clobber as a dead def.
BI->Last = RegMaskSlots[i-1].getDeadSlot();
break;
}