Newer
Older
ReMatOrigDefs.resize(NumValNums, NULL);
SmallVector<int, 4> ReMatIds;
ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
BitVector ReMatDelete(NumValNums);
unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
// Spilling a split live interval. It cannot be split any further. Also,
// it's also guaranteed to be a single val# / range interval.
if (vrm.getPreSplitReg(li.reg)) {
vrm.setIsSplitFromReg(li.reg, 0);
// Unset the split kill marker on the last use.
unsigned KillIdx = vrm.getKillPoint(li.reg);
if (KillIdx) {
MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
assert(KillMI && "Last use disappeared?");
int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
assert(KillOp != -1 && "Last use disappeared?");
KillMI->getOperand(KillOp).setIsKill(false);
Evan Cheng
committed
vrm.removeKillPoint(li.reg);
bool DefIsReMat = vrm.isReMaterialized(li.reg);
Slot = vrm.getStackSlot(li.reg);
assert(Slot != VirtRegMap::MAX_STACK_SLOT);
MachineInstr *ReMatDefMI = DefIsReMat ?
vrm.getReMaterializedMI(li.reg) : NULL;
int LdSlot = 0;
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
bool isLoad = isLoadSS ||
(DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
bool IsFirstRange = true;
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
// If this is a split live interval with multiple ranges, it means there
// are two-address instructions that re-defined the value. Only the
// first def can be rematerialized!
if (IsFirstRange) {
// Note ReMatOrigDefMI has already been deleted.
rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
Evan Cheng
committed
false, vrm, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
Evan Cheng
committed
MBBVRegsMap, NewLIs);
} else {
rewriteInstructionsForSpills(li, false, I, NULL, 0,
Slot, 0, false, false, false,
Evan Cheng
committed
false, vrm, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
Evan Cheng
committed
MBBVRegsMap, NewLIs);
Evan Cheng
committed
handleSpilledImpDefs(li, vrm, rc, NewLIs);
return NewLIs;
}
bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
TrySplit = false;
if (TrySplit)
++numSplits;
bool NeedStackSlot = false;
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
const VNInfo *VNI = *i;
unsigned VN = VNI->id;
continue; // Dead val#.
// Is the def for the val# rematerializable?
MachineInstr *ReMatDefMI = VNI->isDefAccurate()
? getInstructionFromIndex(VNI->def) : 0;
Evan Cheng
committed
bool dummy;
if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
// Original def may be modified so we have to make a copy here.
MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
ClonedMIs.push_back(Clone);
ReMatDefs[VN] = Clone;
// A kill is a phi node, not all of its uses can be rematerialized.
CanDelete = false;
// Need a stack slot if there is any live range where uses cannot be
// rematerialized.
NeedStackSlot = true;
}
if (CanDelete)
ReMatDelete.set(VN);
} else {
// Need a stack slot if there is any live range where uses cannot be
// rematerialized.
NeedStackSlot = true;
}
}
// One stack slot per live interval.
Owen Anderson
committed
if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
Slot = vrm.assignVirt2StackSlot(li.reg);
// This case only occurs when the prealloc splitter has already assigned
// a stack slot to this vreg.
else
Slot = vrm.getStackSlot(li.reg);
}
// Create new intervals and rewrite defs and uses.
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
bool DefIsReMat = ReMatDefMI != NULL;
bool CanDelete = ReMatDelete[I->valno->id];
int LdSlot = 0;
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
(DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
Evan Cheng
committed
CanDelete, vrm, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
Evan Cheng
committed
MBBVRegsMap, NewLIs);
// Insert spills / restores if we are splitting.
if (!TrySplit) {
Evan Cheng
committed
handleSpilledImpDefs(li, vrm, rc, NewLIs);
return NewLIs;
SmallPtrSet<LiveInterval*, 4> AddedKill;
SmallVector<unsigned, 2> Ops;
if (NeedStackSlot) {
int Id = SpillMBBs.find_first();
while (Id != -1) {
std::vector<SRInfo> &spills = SpillIdxes[Id];
for (unsigned i = 0, e = spills.size(); i != e; ++i) {
int index = spills[i].index;
unsigned VReg = spills[i].vreg;
LiveInterval &nI = getOrCreateInterval(VReg);
bool isReMat = vrm.isReMaterialized(VReg);
MachineInstr *MI = getInstructionFromIndex(index);
bool CanFold = false;
bool FoundUse = false;
Ops.clear();
Evan Cheng
committed
if (spills[i].canFold) {
CanFold = true;
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = MI->getOperand(j);
if (!MO.isReg() || MO.getReg() != VReg)
continue;
Ops.push_back(j);
if (MO.isDef())
Evan Cheng
committed
continue;
if (isReMat ||
(!FoundUse && !alsoFoldARestore(Id, index, VReg,
RestoreMBBs, RestoreIdxes))) {
// MI has two-address uses of the same register. If the use
// isn't the first and only use in the BB, then we can't fold
// it. FIXME: Move this to rewriteInstructionsForSpills.
CanFold = false;
Evan Cheng
committed
break;
}
FoundUse = true;
}
}
// Fold the store into the def if possible.
Evan Cheng
committed
bool Folded = false;
if (CanFold && !Ops.empty()) {
if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
Evan Cheng
committed
Folded = true;
// Also folded uses, do not issue a load.
eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
}
nI.removeRange(getDefIndex(index), getStoreIndex(index));
Evan Cheng
committed
}
Evan Cheng
committed
// Otherwise tell the spiller to issue a spill.
if (!Folded) {
LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
bool isKill = LR->end == getStoreIndex(index);
if (!MI->registerDefIsDead(nI.reg))
// No need to spill a dead def.
vrm.addSpillPoint(VReg, isKill, MI);
if (isKill)
AddedKill.insert(&nI);
}
Id = SpillMBBs.find_next(Id);
}
int Id = RestoreMBBs.find_first();
while (Id != -1) {
std::vector<SRInfo> &restores = RestoreIdxes[Id];
for (unsigned i = 0, e = restores.size(); i != e; ++i) {
int index = restores[i].index;
if (index == -1)
continue;
unsigned VReg = restores[i].vreg;
LiveInterval &nI = getOrCreateInterval(VReg);
bool isReMat = vrm.isReMaterialized(VReg);
MachineInstr *MI = getInstructionFromIndex(index);
bool CanFold = false;
Ops.clear();
Evan Cheng
committed
if (restores[i].canFold) {
CanFold = true;
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = MI->getOperand(j);
if (!MO.isReg() || MO.getReg() != VReg)
if (MO.isDef()) {
// If this restore were to be folded, it would have been folded
// already.
CanFold = false;
Ops.push_back(j);
// Fold the load into the use if possible.
Evan Cheng
committed
bool Folded = false;
if (CanFold && !Ops.empty()) {
Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
else {
MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
int LdSlot = 0;
bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
// If the rematerializable def is a load, also try to fold it.
if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
Ops, isLoadSS, LdSlot, VReg);
if (!Folded) {
unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
if (ImpUse) {
// Re-matting an instruction with virtual register use. Add the
// register as an implicit use on the use MI and update the register
// interval's spill weight to HUGE_VALF to prevent it from being
// spilled.
LiveInterval &ImpLi = getInterval(ImpUse);
ImpLi.weight = HUGE_VALF;
MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
}
Evan Cheng
committed
}
}
// If folding is not possible / failed, then tell the spiller to issue a
// load / rematerialization for us.
if (Folded)
nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
vrm.addRestorePoint(VReg, MI);
Id = RestoreMBBs.find_next(Id);
// Finalize intervals: add kills, finalize spill weights, and filter out
// dead intervals.
std::vector<LiveInterval*> RetNewLIs;
for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
LiveInterval *LI = NewLIs[i];
if (!LI->empty()) {
Owen Anderson
committed
LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
if (!AddedKill.count(LI)) {
LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
unsigned LastUseIdx = getBaseIndex(LR->end);
MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
LastUse->getOperand(UseIdx).setIsKill();
vrm.addKillPoint(LI->reg, LastUseIdx);
Evan Cheng
committed
}
RetNewLIs.push_back(LI);
}
}
Evan Cheng
committed
handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
Evan Cheng
committed
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
/// hasAllocatableSuperReg - Return true if the specified physical register has
/// any super register that's allocatable.
bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
if (allocatableRegs_[*AS] && hasInterval(*AS))
return true;
return false;
}
/// getRepresentativeReg - Find the largest super register of the specified
/// physical register.
unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
// Find the largest super-register that is allocatable.
unsigned BestReg = Reg;
for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
unsigned SuperReg = *AS;
if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
BestReg = SuperReg;
break;
}
}
return BestReg;
}
/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
/// specified interval that conflicts with the specified physical register.
unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
unsigned PhysReg) const {
unsigned NumConflicts = 0;
const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
E = mri_->reg_end(); I != E; ++I) {
MachineOperand &O = I.getOperand();
MachineInstr *MI = O.getParent();
unsigned Index = getInstructionIndex(MI);
if (pli.liveAt(Index))
++NumConflicts;
}
return NumConflicts;
}
/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
/// around all defs and uses of the specified interval. Return true if it
/// was able to cut its interval.
bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
Evan Cheng
committed
unsigned PhysReg, VirtRegMap &vrm) {
unsigned SpillReg = getRepresentativeReg(PhysReg);
for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
// If there are registers which alias PhysReg, but which are not a
// sub-register of the chosen representative super register. Assert
// since we can't handle it yet.
assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
Evan Cheng
committed
tri_->isSuperRegister(*AS, SpillReg));
Evan Cheng
committed
LiveInterval &pli = getInterval(SpillReg);
SmallPtrSet<MachineInstr*, 8> SeenMIs;
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
E = mri_->reg_end(); I != E; ++I) {
MachineOperand &O = I.getOperand();
MachineInstr *MI = O.getParent();
if (SeenMIs.count(MI))
continue;
SeenMIs.insert(MI);
unsigned Index = getInstructionIndex(MI);
if (pli.liveAt(Index)) {
vrm.addEmergencySpill(SpillReg, MI);
unsigned StartIdx = getLoadIndex(Index);
unsigned EndIdx = getStoreIndex(Index)+1;
if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
pli.removeRange(StartIdx, EndIdx);
Cut = true;
} else {
cerr << "Ran out of registers during register allocation!\n";
if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
cerr << "Please check your inline asm statement for invalid "
<< "constraints:\n";
MI->print(cerr.stream(), tm_);
}
exit(1);
}
Evan Cheng
committed
for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
if (!hasInterval(*AS))
continue;
LiveInterval &spli = getInterval(*AS);
if (spli.liveAt(Index))
spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
}
}
}
Evan Cheng
committed
}
Owen Anderson
committed
LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
MachineInstr* startInst) {
LiveInterval& Interval = getOrCreateInterval(reg);
VNInfo* VN = Interval.getNextValue(
getInstructionIndex(startInst) + InstrSlots::DEF,
startInst, true, getVNInfoAllocator());
VN->setHasPHIKill(true);
Owen Anderson
committed
VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
getMBBEndIdx(startInst->getParent()) + 1, VN);
Interval.addRange(LR);
return LR;
}