Newer
Older
MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
Evan Cheng
committed
tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
Evan Cheng
committed
CopyMI = MI;
Evan Cheng
committed
handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
Owen Anderson
committed
getOrCreateInterval(MO.getReg()), CopyMI);
Owen Anderson
committed
for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
// If MI also modifies the sub-register explicitly, avoid processing it
// more than once. Do not pass in TRI here so it checks for exact match.
if (!MI->modifiesRegister(*AS))
Evan Cheng
committed
handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
Owen Anderson
committed
getOrCreateInterval(*AS), 0);
Alkis Evlogimenos
committed
}
void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
MachineInstrIndex MIIdx,
DEBUG({
errs() << "\t\tlivein register: ";
Evan Cheng
committed
printRegName(interval.reg, tri_);
// Look for kills, if it reaches a def before it's killed, then it shouldn't
// be considered a livein.
MachineBasicBlock::iterator mi = MBB->begin();
MachineInstrIndex baseIndex = MIIdx;
MachineInstrIndex start = baseIndex;
while (baseIndex.getVecIndex() < i2miMap_.size() &&
Owen Anderson
committed
getInstructionFromIndex(baseIndex) == 0)
baseIndex = getNextIndex(baseIndex);
MachineInstrIndex end = baseIndex;
bool SeenDefUse = false;
Owen Anderson
committed
while (mi != MBB->end()) {
if (mi->killsRegister(interval.reg, tri_)) {
end = getNextSlot(getUseIndex(baseIndex));
SeenDefUse = true;
} else if (mi->modifiesRegister(interval.reg, tri_)) {
// Another instruction redefines the register before it is ever read.
// Then the register is essentially dead at the instruction that defines
// it. Hence its interval is:
// [defSlot(def), defSlot(def)+1)
end = getNextSlot(getDefIndex(start));
SeenDefUse = true;
baseIndex = getNextIndex(baseIndex);
if (mi != MBB->end()) {
while (baseIndex.getVecIndex() < i2miMap_.size() &&
getInstructionFromIndex(baseIndex) == 0)
baseIndex = getNextIndex(baseIndex);
}
// Live-in register might not be used at all.
if (!SeenDefUse) {
end = getNextSlot(getDefIndex(MIIdx));
end = baseIndex;
}
VNInfo *vni =
interval.getNextValue(MachineInstrIndex(MBB->getNumber()),
0, false, VNInfoAllocator);
vni->setIsPHIDef(true);
LiveRange LR(start, end, vni);
LR.valno->addKill(end);
Evan Cheng
committed
bool
LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
SmallVector<MachineInstr*,16> &IdentCopies,
SmallVector<MachineInstr*,16> &OtherCopies) {
bool HaveConflict = false;
Evan Cheng
committed
unsigned NumIdent = 0;
for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(SrcInt.reg),
re = mri_->reg_end(); ri != re; ++ri) {
MachineOperand &O = ri.getOperand();
if (!O.isDef())
continue;
MachineInstr *MI = &*ri;
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
return false;
Evan Cheng
committed
if (SrcReg != DstInt.reg) {
OtherCopies.push_back(MI);
HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
} else {
IdentCopies.push_back(MI);
++NumIdent;
}
}
if (!HaveConflict)
return false; // Let coalescer handle it
return IdentCopies.size() > OtherCopies.size();
Evan Cheng
committed
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
}
void LiveIntervals::performEarlyCoalescing() {
if (!EarlyCoalescing)
return;
/// Perform early coalescing: eliminate copies which feed into phi joins
/// and whose sources are defined by the phi joins.
for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
MachineInstr *Join = phiJoinCopies[i];
if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
break;
unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
#ifndef NDEBUG
assert(isMove && "PHI join instruction must be a move!");
#else
isMove = isMove;
#endif
LiveInterval &DstInt = getInterval(PHIDst);
LiveInterval &SrcInt = getInterval(PHISrc);
SmallVector<MachineInstr*, 16> IdentCopies;
SmallVector<MachineInstr*, 16> OtherCopies;
if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
Evan Cheng
committed
continue;
DEBUG(errs() << "PHI Join: " << *Join);
assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
VNInfo *VNI = DstInt.getValNumInfo(0);
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
// Change the non-identity copies to directly target the phi destination.
for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
MachineInstr *PHICopy = OtherCopies[i];
DEBUG(errs() << "Moving: " << *PHICopy);
MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
MachineInstrIndex DefIndex = getDefIndex(MIIndex);
LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
MachineInstrIndex StartIndex = SLR->start;
MachineInstrIndex EndIndex = SLR->end;
// Delete val# defined by the now identity copy and add the range from
// beginning of the mbb to the end of the range.
SrcInt.removeValNo(SLR->valno);
DEBUG(errs() << " added range [" << StartIndex << ','
<< EndIndex << "] to reg" << DstInt.reg << '\n');
if (DstInt.liveAt(StartIndex))
DstInt.removeRange(StartIndex, EndIndex);
VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
VNInfoAllocator);
NewVNI->setHasPHIKill(true);
DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = PHICopy->getOperand(j);
if (!MO.isReg() || MO.getReg() != PHISrc)
continue;
MO.setReg(PHIDst);
}
}
Evan Cheng
committed
// Now let's eliminate all the would-be identity copies.
for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
MachineInstr *PHICopy = IdentCopies[i];
DEBUG(errs() << "Coalescing: " << *PHICopy);
MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
MachineInstrIndex DefIndex = getDefIndex(MIIndex);
LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
MachineInstrIndex StartIndex = SLR->start;
Evan Cheng
committed
MachineInstrIndex EndIndex = SLR->end;
// Delete val# defined by the now identity copy and add the range from
// beginning of the mbb to the end of the range.
SrcInt.removeValNo(SLR->valno);
RemoveMachineInstrFromMaps(PHICopy);
PHICopy->eraseFromParent();
DEBUG(errs() << " added range [" << StartIndex << ','
<< EndIndex << "] to reg" << DstInt.reg << '\n');
DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
Evan Cheng
committed
}
// Remove the phi join and update the phi block liveness.
MachineInstrIndex MIIndex = getInstructionIndex(Join);
MachineInstrIndex UseIndex = getUseIndex(MIIndex);
MachineInstrIndex DefIndex = getDefIndex(MIIndex);
LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
DLR->valno->setCopy(0);
DLR->valno->setIsDefAccurate(false);
DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
SrcInt.removeRange(SLR->start, SLR->end);
assert(SrcInt.empty());
removeInterval(PHISrc);
RemoveMachineInstrFromMaps(Join);
Join->eraseFromParent();
Evan Cheng
committed
++numCoalescing;
}
}
Alkis Evlogimenos
committed
/// computeIntervals - computes the live intervals for virtual
Alkis Evlogimenos
committed
/// registers. for some ordering of the machine instructions [1,N] a
/// live interval is an interval [i, j) where 1 <= i <= j < N for
Alkis Evlogimenos
committed
/// which a variable is live
void LiveIntervals::computeIntervals() {
Daniel Dunbar
committed
DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
<< "********** Function: "
<< ((Value*)mf_->getFunction())->getName() << '\n');
Evan Cheng
committed
SmallVector<unsigned, 8> UndefUses;
for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
MBBI != E; ++MBBI) {
MachineBasicBlock *MBB = MBBI;
Owen Anderson
committed
// Track the index of the current machine instr.
MachineInstrIndex MIIndex = getMBBStartIdx(MBB);
Daniel Dunbar
committed
DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
Evan Cheng
committed
// Create intervals for live-ins to this BB first.
for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
LE = MBB->livein_end(); LI != LE; ++LI) {
handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
// Multiple live-ins can alias the same register.
for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
if (!hasInterval(*AS))
handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
true);
Owen Anderson
committed
// Skip over empty initial indices.
while (MIIndex.getVecIndex() < i2miMap_.size() &&
Owen Anderson
committed
getInstructionFromIndex(MIIndex) == 0)
MIIndex = getNextIndex(MIIndex);
Owen Anderson
committed
for (; MI != miEnd; ++MI) {
DEBUG(errs() << MIIndex << "\t" << *MI);
for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
MachineOperand &MO = MI->getOperand(i);
Evan Cheng
committed
if (!MO.isReg() || !MO.getReg())
continue;
// handle register defs - build intervals
Evan Cheng
committed
if (MO.isDef())
Evan Cheng
committed
handleRegisterDef(MBB, MI, MIIndex, MO, i);
Evan Cheng
committed
else if (MO.isUndef())
UndefUses.push_back(MO.getReg());
// Skip over the empty slots after each instruction.
unsigned Slots = MI->getDesc().getNumDefs();
if (Slots == 0)
Slots = 1;
while (Slots--)
MIIndex = getNextIndex(MIIndex);
Owen Anderson
committed
// Skip over empty indices.
while (MIIndex.getVecIndex() < i2miMap_.size() &&
Owen Anderson
committed
getInstructionFromIndex(MIIndex) == 0)
MIIndex = getNextIndex(MIIndex);
Alkis Evlogimenos
committed
}
Evan Cheng
committed
// Create empty intervals for registers defined by implicit_def's (except
// for those implicit_def that define values which are liveout of their
// blocks.
for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
unsigned UndefReg = UndefUses[i];
(void)getOrCreateInterval(UndefReg);
}
Alkis Evlogimenos
committed
}
Alkis Evlogimenos
committed
bool LiveIntervals::findLiveInMBBs(
MachineInstrIndex Start, MachineInstrIndex End,
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
std::vector<IdxMBBPair>::const_iterator I =
std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
bool ResVal = false;
while (I != Idx2MBBMap.end()) {
if (I->first >= End)
break;
MBBs.push_back(I->second);
ResVal = true;
++I;
}
return ResVal;
}
bool LiveIntervals::findReachableMBBs(
MachineInstrIndex Start, MachineInstrIndex End,
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
std::vector<IdxMBBPair>::const_iterator I =
std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
bool ResVal = false;
while (I != Idx2MBBMap.end()) {
if (I->first > End)
break;
MachineBasicBlock *MBB = I->second;
if (getMBBEndIdx(MBB) > End)
break;
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
SE = MBB->succ_end(); SI != SE; ++SI)
MBBs.push_back(*SI);
ResVal = true;
++I;
}
return ResVal;
}
Owen Anderson
committed
LiveInterval* LiveIntervals::createInterval(unsigned reg) {
float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
Owen Anderson
committed
return new LiveInterval(reg, Weight);
/// dupInterval - Duplicate a live interval. The caller is responsible for
/// managing the allocated memory.
LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
LiveInterval *NewLI = createInterval(li->reg);
NewLI->Copy(*li, mri_, getVNInfoAllocator());
return NewLI;
}
Evan Cheng
committed
/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
/// copy field and returns the source register that defines it.
unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
Lang Hames
committed
if (!VNI->getCopy())
Evan Cheng
committed
return 0;
Lang Hames
committed
if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
Evan Cheng
committed
// If it's extracting out of a physical register, return the sub-register.
Lang Hames
committed
unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
Evan Cheng
committed
if (TargetRegisterInfo::isPhysicalRegister(Reg))
Lang Hames
committed
Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
Evan Cheng
committed
return Reg;
Lang Hames
committed
} else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
return VNI->getCopy()->getOperand(2).getReg();
Evan Cheng
committed
Evan Cheng
committed
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
Lang Hames
committed
if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
Evan Cheng
committed
return SrcReg;
llvm_unreachable("Unrecognized copy instruction!");
Evan Cheng
committed
return 0;
}
//===----------------------------------------------------------------------===//
// Register allocator hooks.
//
Evan Cheng
committed
/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
/// allow one) virtual register operand, then its uses are implicitly using
/// the register. Returns the virtual register.
unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
MachineInstr *MI) const {
unsigned RegOp = 0;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse())
Evan Cheng
committed
continue;
unsigned Reg = MO.getReg();
if (Reg == 0 || Reg == li.reg)
continue;
if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
!allocatableRegs_[Reg])
continue;
Evan Cheng
committed
// FIXME: For now, only remat MI with at most one register operand.
assert(!RegOp &&
"Can't rematerialize instruction with multiple register operand!");
RegOp = MO.getReg();
Dan Gohman
committed
#ifndef NDEBUG
Evan Cheng
committed
break;
Dan Gohman
committed
#endif
Evan Cheng
committed
}
return RegOp;
}
/// isValNoAvailableAt - Return true if the val# of the specified interval
/// which reaches the given instruction also reaches the specified use index.
bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
MachineInstrIndex UseIdx) const {
MachineInstrIndex Index = getInstructionIndex(MI);
Evan Cheng
committed
VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
return UI != li.end() && UI->valno == ValNo;
}
/// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
Evan Cheng
committed
const VNInfo *ValNo, MachineInstr *MI,
SmallVectorImpl<LiveInterval*> &SpillIs,
Evan Cheng
committed
bool &isLoad) {
if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
Evan Cheng
committed
return true;
int FrameIdx = 0;
if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
Evan Cheng
committed
mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
// FIXME: Let target specific isReallyTriviallyReMaterializable determines
// this but remember this is not safe to fold into a two-address
// instruction.
Evan Cheng
committed
// This is a load from fixed stack slot. It can be rematerialized.
return true;
Dan Gohman
committed
// If the target-specific rules don't identify an instruction as
// being trivially rematerializable, use some target-independent
// rules.
if (!MI->getDesc().isRematerializable() ||
!tii_->isTriviallyReMaterializable(MI)) {
if (!EnableAggressiveRemat)
return false;
Dan Gohman
committed
// If the instruction accesses memory but the memoperands have been lost,
Dan Gohman
committed
// we can't analyze it.
const TargetInstrDesc &TID = MI->getDesc();
Dan Gohman
committed
if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
return false;
// Avoid instructions obviously unsafe for remat.
if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
return false;
// If the instruction accesses memory and the memory could be non-constant,
// assume the instruction is not rematerializable.
for (std::list<MachineMemOperand>::const_iterator
I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
Dan Gohman
committed
const MachineMemOperand &MMO = *I;
if (MMO.isVolatile() || MMO.isStore())
return false;
const Value *V = MMO.getValue();
if (!V)
return false;
if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
if (!PSV->isConstant(mf_->getFrameInfo()))
return false;
} else if (!aa_->pointsToConstantMemory(V))
return false;
}
// If any of the registers accessed are non-constant, conservatively assume
// the instruction is not rematerializable.
unsigned ImpUse = 0;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg()) {
Dan Gohman
committed
unsigned Reg = MO.getReg();
if (Reg == 0)
Evan Cheng
committed
continue;
Dan Gohman
committed
if (TargetRegisterInfo::isPhysicalRegister(Reg))
return false;
// Only allow one def, and that in the first operand.
if (MO.isDef() != (i == 0))
Evan Cheng
committed
return false;
Dan Gohman
committed
// Only allow constant-valued registers.
bool IsLiveIn = mri_->isLiveIn(Reg);
MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
E = mri_->def_end();
// For the def, it should be the only def of that register.
Dan Gohman
committed
if (MO.isDef() && (next(I) != E || IsLiveIn))
return false;
if (MO.isUse()) {
// Only allow one use other register use, as that's all the
// remat mechanisms support currently.
if (Reg != li.reg) {
if (ImpUse == 0)
ImpUse = Reg;
else if (Reg != ImpUse)
return false;
}
// For the use, there should be only one associated def.
Dan Gohman
committed
if (I != E && (next(I) != E || IsLiveIn))
return false;
}
Evan Cheng
committed
}
}
Evan Cheng
committed
}
Dan Gohman
committed
unsigned ImpUse = getReMatImplicitUse(li, MI);
if (ImpUse) {
const LiveInterval &ImpLi = getInterval(ImpUse);
for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
re = mri_->use_end(); ri != re; ++ri) {
MachineInstr *UseMI = &*ri;
MachineInstrIndex UseIdx = getInstructionIndex(UseMI);
Dan Gohman
committed
if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
continue;
if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
return false;
}
// If a register operand of the re-materialized instruction is going to
// be spilled next, then it's not legal to re-materialize this instruction.
for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
if (ImpUse == SpillIs[i]->reg)
return false;
Dan Gohman
committed
}
return true;
Evan Cheng
committed
}
Evan Cheng
committed
/// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
const VNInfo *ValNo, MachineInstr *MI) {
SmallVector<LiveInterval*, 4> Dummy1;
bool Dummy2;
return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
}
Evan Cheng
committed
/// isReMaterializable - Returns true if every definition of MI of every
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
SmallVectorImpl<LiveInterval*> &SpillIs,
bool &isLoad) {
Evan Cheng
committed
isLoad = false;
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
const VNInfo *VNI = *i;
Evan Cheng
committed
continue; // Dead val#.
// Is the def for the val# rematerializable?
Evan Cheng
committed
return false;
MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
Evan Cheng
committed
bool DefIsLoad = false;
Evan Cheng
committed
if (!ReMatDefMI ||
!isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
Evan Cheng
committed
isLoad |= DefIsLoad;
/// FilterFoldedOps - Filter out two-address use operands. Return
/// true if it finds any issue with the operands that ought to prevent
/// folding.
static bool FilterFoldedOps(MachineInstr *MI,
SmallVector<unsigned, 2> &Ops,
unsigned &MRInfo,
SmallVector<unsigned, 2> &FoldOps) {
MRInfo = 0;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
unsigned OpIdx = Ops[i];
Evan Cheng
committed
MachineOperand &MO = MI->getOperand(OpIdx);
// FIXME: fold subreg use.
Evan Cheng
committed
if (MO.getSubReg())
return true;
Evan Cheng
committed
if (MO.isDef())
MRInfo |= (unsigned)VirtRegMap::isMod;
else {
// Filter out two-address use operand(s).
if (MI->isRegTiedToDefOperand(OpIdx)) {
MRInfo = VirtRegMap::isModRef;
continue;
}
MRInfo |= (unsigned)VirtRegMap::isRef;
}
FoldOps.push_back(OpIdx);
Evan Cheng
committed
}
return false;
}
/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
/// slot / to reg or any rematerialized load into ith operand of specified
/// MI. If it is successul, MI is updated with the newly created MI and
/// returns true.
bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
VirtRegMap &vrm, MachineInstr *DefMI,
MachineInstrIndex InstrIdx,
SmallVector<unsigned, 2> &Ops,
bool isSS, int Slot, unsigned Reg) {
// If it is an implicit def instruction, just delete it.
if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
RemoveMachineInstrFromMaps(MI);
vrm.RemoveMachineInstrFromMaps(MI);
MI->eraseFromParent();
++numFolds;
return true;
}
// Filter the list of operand indexes that are to be folded. Abort if
// any operand will prevent folding.
unsigned MRInfo = 0;
SmallVector<unsigned, 2> FoldOps;
if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
return false;
Evan Cheng
committed
Evan Cheng
committed
// The only time it's safe to fold into a two address instruction is when
// it's folding reload and spill from / into a spill stack slot.
if (DefMI && (MRInfo & VirtRegMap::isMod))
Evan Cheng
committed
return false;
MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
: tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
// Remember this instruction uses the spill slot.
if (isSS) vrm.addSpillSlotUse(Slot, fmi);
// Attempt to fold the memory reference into the instruction. If
// we can do this, we don't need to insert spill code.
MachineBasicBlock &MBB = *MI->getParent();
if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
vrm.transferRestorePts(MI, fmi);
vrm.transferEmergencySpills(MI, fmi);
i2miMap_[InstrIdx.getVecIndex()] = fmi;
Evan Cheng
committed
mi2iMap_[fmi] = InstrIdx;
++numFolds;
Evan Cheng
committed
/// canFoldMemoryOperand - Returns true if the specified load / store
/// folding is possible.
bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
SmallVector<unsigned, 2> &Ops,
// Filter the list of operand indexes that are to be folded. Abort if
// any operand will prevent folding.
unsigned MRInfo = 0;
Evan Cheng
committed
SmallVector<unsigned, 2> FoldOps;
if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
return false;
Evan Cheng
committed
// It's only legal to remat for a use, not a def.
if (ReMat && (MRInfo & VirtRegMap::isMod))
return false;
Evan Cheng
committed
Evan Cheng
committed
return tii_->canFoldMemoryOperand(MI, FoldOps);
}
bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
SmallPtrSet<MachineBasicBlock*, 4> MBBs;
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
std::vector<IdxMBBPair>::const_iterator II =
std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
if (II == Idx2MBBMap.end())
continue;
if (I->end > II->first) // crossing a MBB.
return false;
MBBs.insert(II->second);
if (MBBs.size() > 1)
return false;
}
return true;
}
Evan Cheng
committed
/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
/// interval on to-be re-materialized operands of MI) with new register.
void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
MachineInstr *MI, unsigned NewVReg,
VirtRegMap &vrm) {
// There is an implicit use. That means one of the other operand is
// being remat'ed and the remat'ed instruction has li.reg as an
// use operand. Make sure we rewrite that as well.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg())
Evan Cheng
committed
continue;
unsigned Reg = MO.getReg();
if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
if (!vrm.isReMaterialized(Reg))
continue;
MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
if (UseMO)
UseMO->setReg(NewVReg);
Evan Cheng
committed
}
}
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
Evan Cheng
committed
bool LiveIntervals::
Evan Cheng
committed
rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
MachineInstr *MI,
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
Evan Cheng
committed
VirtRegMap &vrm,
const TargetRegisterClass* rc,
SmallVector<int, 4> &ReMatIds,
unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
DenseMap<unsigned,unsigned> &MBBVRegsMap,
Evan Cheng
committed
std::vector<LiveInterval*> &NewLIs) {
Evan Cheng
committed
bool CanFold = false;
RestartInstruction:
for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
MachineOperand& mop = MI->getOperand(i);
if (!mop.isReg())
continue;
unsigned Reg = mop.getReg();
unsigned RegI = Reg;
if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
if (Reg != li.reg)
continue;
bool TryFold = !DefIsReMat;
bool FoldSS = true; // Default behavior unless it's a remat.
int FoldSlot = Slot;
if (DefIsReMat) {
// If this is the rematerializable definition MI itself and
// all of its uses are rematerialized, simply delete it.
DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
<< MI << '\n');
MI->eraseFromParent();
break;
}
// If def for this use can't be rematerialized, then try folding.
// If def is rematerializable and it's a load, also try folding.
TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
if (isLoad) {
// Try fold loads (from stack slot, constant pool, etc.) into uses.
FoldSS = isLoadSS;
FoldSlot = LdSlot;
}
}
// Scan all of the operands of this instruction rewriting operands
// to use NewVReg instead of li.reg as appropriate. We do this for
// two reasons:
//
// 1. If the instr reads the same spilled vreg multiple times, we
// want to reuse the NewVReg.
// 2. If the instr is a two-addr instruction, we are required to
// keep the src/dst regs pinned.
//
// Keep track of whether we replace a use and/or def so that we can
// create the spill interval with the appropriate range.
Evan Cheng
committed
SmallVector<unsigned, 2> Ops;
Ops.push_back(i);
for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
const MachineOperand &MOj = MI->getOperand(j);
if (!MOj.isReg())
unsigned RegJ = MOj.getReg();
if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
Ops.push_back(j);
Evan Cheng
committed
if (!MOj.isUndef()) {
HasUse |= MOj.isUse();
HasDef |= MOj.isDef();
}
// Create a new virtual register for the spill interval.
// Create the new register now so we can map the fold instruction
// to the new register so when it is unfolded we get the correct
// answer.
bool CreatedNewVReg = false;
if (NewVReg == 0) {
NewVReg = mri_->createVirtualRegister(rc);
vrm.grow();
CreatedNewVReg = true;
}
if (!TryFold)
CanFold = false;
else {
Evan Cheng
committed
// Do not fold load / store here if we are splitting. We'll find an
// optimal point to insert a load / store later.
if (!TrySplit) {
if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
Evan Cheng
committed
// Folding the load/store can completely change the instruction in
// unpredictable ways, rescan it from the beginning.
if (FoldSS) {
// We need to give the new vreg the same stack slot as the
// spilled interval.
vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
}
Evan Cheng
committed
HasUse = false;
HasDef = false;
CanFold = false;
Evan Cheng
committed
if (isNotInMIMap(MI))
Evan Cheng
committed
break;
Evan Cheng
committed
goto RestartInstruction;
}
} else {
// We'll try to fold it later if it's profitable.
CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
Evan Cheng
committed
}
Evan Cheng
committed
mop.setReg(NewVReg);
Evan Cheng
committed
if (mop.isImplicit())
rewriteImplicitOps(li, MI, NewVReg, vrm);
Evan Cheng
committed
// Reuse NewVReg for other reads.
Evan Cheng
committed
for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
MachineOperand &mopj = MI->getOperand(Ops[j]);
mopj.setReg(NewVReg);
if (mopj.isImplicit())
rewriteImplicitOps(li, MI, NewVReg, vrm);
}
Evan Cheng
committed
Evan Cheng
committed
vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
Evan Cheng
committed
if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
Evan Cheng
committed
ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
Evan Cheng
committed
vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
}
if (!CanDelete || (HasUse && HasDef)) {
// If this is a two-addr instruction then its use operands are
// rematerializable but its def is not. It should be assigned a
// stack slot.
vrm.assignVirt2StackSlot(NewVReg, Slot);
}
} else {
vrm.assignVirt2StackSlot(NewVReg, Slot);
}
} else if (HasUse && HasDef &&
vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
// If this interval hasn't been assigned a stack slot (because earlier
// def is a deleted remat def), do it now.
assert(Slot != VirtRegMap::NO_STACK_SLOT);
vrm.assignVirt2StackSlot(NewVReg, Slot);
// Re-matting an instruction with virtual register use. Add the
// register as an implicit use on the use MI.
if (DefIsReMat && ImpUse)
MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
// Create a new register interval for this spill / remat.
if (CreatedNewVReg) {
NewLIs.push_back(&nI);
MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
if (TrySplit)
vrm.setIsSplitFromReg(NewVReg, li.reg);
}
LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
nI.getNextValue(MachineInstrIndex(), 0, false,
VNInfoAllocator));
nI.addRange(LR);
} else {
// Extend the split live interval to this def / use.
MachineInstrIndex End = getNextSlot(getUseIndex(index));
LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
nI.getValNumInfo(nI.getNumValNums()-1));
}
if (HasDef) {
LiveRange LR(getDefIndex(index), getStoreIndex(index),
nI.getNextValue(MachineInstrIndex(), 0, false,
VNInfoAllocator));
DEBUG({
errs() << "\t\t\t\tAdded new interval: ";
nI.print(errs(), tri_);
errs() << '\n';
});
Evan Cheng
committed
return CanFold;
bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
const VNInfo *VNI,
MachineBasicBlock *MBB,
MachineInstrIndex Idx) const {
MachineInstrIndex End = getMBBEndIdx(MBB);
for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
if (VNI->kills[j].isPHIIndex())
continue;
MachineInstrIndex KillIdx = VNI->kills[j];
if (KillIdx > Idx && KillIdx < End)
return true;
/// RewriteInfo - Keep track of machine instrs that will be rewritten
/// during spilling.
namespace {
struct RewriteInfo {
MachineInstrIndex Index;
MachineInstr *MI;
bool HasUse;
bool HasDef;
RewriteInfo(MachineInstrIndex i, MachineInstr *mi, bool u, bool d)
: Index(i), MI(mi), HasUse(u), HasDef(d) {}
};
struct RewriteInfoCompare {
bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
return LHS.Index < RHS.Index;
}
};
}
rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
Evan Cheng
committed
VirtRegMap &vrm,
const TargetRegisterClass* rc,
SmallVector<int, 4> &ReMatIds,
DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
BitVector &RestoreMBBs,
DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
DenseMap<unsigned,unsigned> &MBBVRegsMap,
Evan Cheng
committed
std::vector<LiveInterval*> &NewLIs) {
Evan Cheng
committed
bool AllCanFold = true;
MachineInstrIndex start = getBaseIndex(I->start);
MachineInstrIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
// First collect all the def / use in this live range that will be rewritten.
Evan Cheng
committed
// Make sure they are sorted according to instruction index.
std::vector<RewriteInfo> RewriteMIs;
Evan Cheng
committed
for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
re = mri_->reg_end(); ri != re; ) {
MachineInstr *MI = &*ri;
MachineOperand &O = ri.getOperand();
++ri;
Evan Cheng
committed
assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
MachineInstrIndex index = getInstructionIndex(MI);
if (index < start || index >= end)
continue;
Evan Cheng
committed
if (O.isUndef())
Evan Cheng
committed
// Must be defined by an implicit def. It should not be spilled. Note,
// this is for correctness reason. e.g.
// 8 %reg1024<def> = IMPLICIT_DEF
// 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
// The live range [12, 14) are not part of the r1024 live interval since
// it's defined by an implicit def. It will not conflicts with live
// interval of r1025. Now suppose both registers are spilled, you can
Evan Cheng
committed
// the INSERT_SUBREG and both target registers that would overlap.
continue;
RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
}
std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
// Now rewrite the defs and uses.
for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
RewriteInfo &rwi = RewriteMIs[i];