Newer
Older
Chris Lattner
committed
unsigned SubIdx = MO.getSubReg();
if (VRM.isAssignedReg(VirtReg)) {
Chris Lattner
committed
// This virtual register was assigned a physreg!
unsigned Phys = VRM.getPhys(VirtReg);
if (MO.isDef())
ReusedOperands.markClobbered(Phys);
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
// If it's a split live interval, insert a reload for the first use
// unless it's previously defined in the MBB.
unsigned SplitReg = VRM.getPreSplitReg(VirtReg);
if (SplitReg) {
if (ReloadedSplits.insert(VirtReg)) {
bool HasUse = MO.isUse();
// If it's a def, we don't need to reload the value unless it's
// a two-address code.
if (!HasUse) {
for (unsigned j = i+1; j != e; ++j) {
MachineOperand &MOJ = MI.getOperand(j);
if (MOJ.isRegister() && MOJ.getReg() == VirtReg) {
HasUse = true;
break;
}
}
}
if (HasUse) {
if (VRM.isReMaterialized(VirtReg)) {
MRI->reMaterialize(MBB, &MI, Phys,
VRM.getReMaterializedMI(VirtReg));
++NumReMats;
} else {
const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg);
MRI->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg), RC);
++NumLoads;
}
// This invalidates Phys.
Spills.ClobberPhysReg(Phys);
UpdateKills(*prior(MII), RegKills, KillOps);
DOUT << '\t' << *prior(MII);
}
}
}
unsigned RReg = SubIdx ? MRI->getSubReg(Phys, SubIdx) : Phys;
Evan Cheng
committed
MI.getOperand(i).setReg(RReg);
Chris Lattner
committed
continue;
}
// This virtual register is now known to be a spilled value.
if (!MO.isUse())
continue; // Handle defs in the loop below (handle use&def here though)
bool DoReMat = VRM.isReMaterialized(VirtReg);
int SSorRMId = DoReMat
? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
Evan Cheng
committed
int ReuseSlot = SSorRMId;
Chris Lattner
committed
// Check to see if this stack slot is available.
Evan Cheng
committed
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
if (!PhysReg && DoReMat) {
// This use is rematerializable. But perhaps the value is available in
// a register if the definition is not deleted. If so, check if we can
Evan Cheng
committed
// reuse the value.
ReuseSlot = VRM.getStackSlot(VirtReg);
if (ReuseSlot != VirtRegMap::NO_STACK_SLOT)
PhysReg = Spills.getSpillSlotOrReMatPhysReg(ReuseSlot);
}
Evan Cheng
committed
// If this is a sub-register use, make sure the reuse register is in the
// right register class. For example, for x86 not all of the 32-bit
// registers have accessible sub-registers.
// Similarly so for EXTRACT_SUBREG. Consider this:
// EDI = op
// MOV32_mr fi#1, EDI
// ...
// = EXTRACT_SUBREG fi#1
// fi#1 is available in EDI, but it cannot be reused because it's not in
// the right register file.
if (PhysReg &&
(SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) {
Evan Cheng
committed
const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg);
if (!RC->contains(PhysReg))
PhysReg = 0;
}
Evan Cheng
committed
if (PhysReg) {
Chris Lattner
committed
// This spilled operand might be part of a two-address operand. If this
// is the case, then changing it will necessarily require changing the
// def part of the instruction as well. However, in some cases, we
// aren't allowed to modify the reused register. If none of these cases
// apply, reuse it.
bool CanReuse = true;
if (ti != -1 &&
MI.getOperand(ti).isRegister() &&
MI.getOperand(ti).getReg() == VirtReg) {
Chris Lattner
committed
// Okay, we have a two address operand. We can reuse this physreg as
// long as we are allowed to clobber the value and there isn't an
// earlier def that has already clobbered the physreg.
Evan Cheng
committed
CanReuse = Spills.canClobberPhysReg(ReuseSlot) &&
!ReusedOperands.isClobbered(PhysReg);
Chris Lattner
committed
}
if (CanReuse) {
// If this stack slot value is already available, reuse it!
Evan Cheng
committed
if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
Evan Cheng
committed
DOUT << "Reusing SS#" << ReuseSlot;
<< MRI->getName(PhysReg) << " for vreg"
<< VirtReg <<" instead of reloading into physreg "
<< MRI->getName(VRM.getPhys(VirtReg)) << "\n";
unsigned RReg = SubIdx ? MRI->getSubReg(PhysReg, SubIdx) : PhysReg;
Evan Cheng
committed
MI.getOperand(i).setReg(RReg);
// The only technical detail we have is that we don't know that
// PhysReg won't be clobbered by a reloaded stack slot that occurs
// later in the instruction. In particular, consider 'op V1, V2'.
// If V1 is available in physreg R0, we would choose to reuse it
// here, instead of reloading it into the register the allocator
// indicated (say R1). However, V2 might have to be reloaded
// later, and it might indicate that it needs to live in R0. When
// this occurs, we need to have information available that
// indicates it is safe to use R1 for the reload instead of R0.
//
// To further complicate matters, we might conflict with an alias,
// or R0 and R1 might not be compatible with each other. In this
// case, we actually insert a reload for V1 in R1, ensuring that
// we can get at R0 or its alias.
Evan Cheng
committed
ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
VRM.getPhys(VirtReg), VirtReg);
if (ti != -1)
// Only mark it clobbered if this is a use&def operand.
ReusedOperands.markClobbered(PhysReg);
++NumReused;
if (MI.getOperand(i).isKill() &&
ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
// This was the last use and the spilled value is still available
// for reuse. That means the spill was unnecessary!
MachineInstr* DeadStore = MaybeDeadStores[ReuseSlot];
if (DeadStore) {
DOUT << "Removed dead store:\t" << *DeadStore;
InvalidateKills(*DeadStore, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(DeadStore);
MaybeDeadStores[ReuseSlot] = NULL;
++NumDSE;
}
}
continue;
Evan Cheng
committed
} // CanReuse
// Otherwise we have a situation where we have a two-address instruction
// whose mod/ref operand needs to be reloaded. This reload is already
// available in some register "PhysReg", but if we used PhysReg as the
// operand to our 2-addr instruction, the instruction would modify
// PhysReg. This isn't cool if something later uses PhysReg and expects
// to get its initial value.
Chris Lattner
committed
//
// To avoid this problem, and to avoid doing a load right after a store,
// we emit a copy from PhysReg into the designated register for this
// operand.
unsigned DesignatedReg = VRM.getPhys(VirtReg);
assert(DesignatedReg && "Must map virtreg to physreg!");
// Note that, if we reused a register for a previous operand, the
// register we want to reload into might not actually be
// available. If this occurs, use the register indicated by the
// reuser.
if (ReusedOperands.hasReuses())
DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI,
Spills, MaybeDeadStores, RegKills, KillOps, VRM);
Chris Lattner
committed
// If the mapped designated register is actually the physreg we have
// incoming, we don't need to inserted a dead copy.
if (DesignatedReg == PhysReg) {
// If this stack slot value is already available, reuse it!
Evan Cheng
committed
if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
Evan Cheng
committed
DOUT << "Reusing SS#" << ReuseSlot;
DOUT << " from physreg " << MRI->getName(PhysReg) << " for vreg"
<< VirtReg
<< " instead of reloading into same physreg.\n";
unsigned RReg = SubIdx ? MRI->getSubReg(PhysReg, SubIdx) : PhysReg;
Evan Cheng
committed
MI.getOperand(i).setReg(RReg);
ReusedOperands.markClobbered(RReg);
Chris Lattner
committed
++NumReused;
continue;
}
Evan Cheng
committed
const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg);
MF.setPhysRegUsed(DesignatedReg);
ReusedOperands.markClobbered(DesignatedReg);
MRI->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC);
MachineInstr *CopyMI = prior(MII);
Evan Cheng
committed
UpdateKills(*CopyMI, RegKills, KillOps);
// This invalidates DesignatedReg.
Spills.ClobberPhysReg(DesignatedReg);
Evan Cheng
committed
Spills.addAvailable(ReuseSlot, &MI, DesignatedReg);
Evan Cheng
committed
unsigned RReg =
SubIdx ? MRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
Evan Cheng
committed
MI.getOperand(i).setReg(RReg);
DOUT << '\t' << *prior(MII);
Chris Lattner
committed
++NumReused;
continue;
Chris Lattner
committed
// Otherwise, reload it and remember that we have it.
PhysReg = VRM.getPhys(VirtReg);
Chris Lattner
committed
assert(PhysReg && "Must map virtreg to physreg!");
Chris Lattner
committed
// Note that, if we reused a register for a previous operand, the
// register we want to reload into might not actually be
// available. If this occurs, use the register indicated by the
// reuser.
Chris Lattner
committed
if (ReusedOperands.hasReuses())
PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
Spills, MaybeDeadStores, RegKills, KillOps, VRM);
Chris Lattner
committed
ReusedOperands.markClobbered(PhysReg);
if (DoReMat) {
MRI->reMaterialize(MBB, &MI, PhysReg, VRM.getReMaterializedMI(VirtReg));
++NumReMats;
} else {
Evan Cheng
committed
const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg);
MRI->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
Chris Lattner
committed
// This invalidates PhysReg.
Chris Lattner
committed
Spills.ClobberPhysReg(PhysReg);
Chris Lattner
committed
// Any stores to this stack slot are not dead anymore.
if (!DoReMat)
MaybeDeadStores[SSorRMId] = NULL;
Spills.addAvailable(SSorRMId, &MI, PhysReg);
// Assumes this is the last use. IsKill will be unset if reg is reused
// unless it's a two-address operand.
if (TID->getOperandConstraint(i, TOI::TIED_TO) == -1)
MI.getOperand(i).setIsKill();
unsigned RReg = SubIdx ? MRI->getSubReg(PhysReg, SubIdx) : PhysReg;
Evan Cheng
committed
MI.getOperand(i).setReg(RReg);
Evan Cheng
committed
UpdateKills(*prior(MII), RegKills, KillOps);
DOUT << '\t' << *prior(MII);
}
DOUT << '\t' << MI;
// If we have folded references to memory operands, make sure we clear all
// physical registers that may contain the value of the spilled virtual
// register
for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
Chris Lattner
committed
unsigned VirtReg = I->second.first;
VirtRegMap::ModRef MR = I->second.second;
DOUT << "Folded vreg: " << VirtReg << " MR: " << MR;
// If this is a split live interval, remember we have seen this so
// we do not need to reload it for later uses.
unsigned SplitReg = VRM.getPreSplitReg(VirtReg);
if (SplitReg)
ReloadedSplits.insert(VirtReg);
Chris Lattner
committed
int SS = VRM.getStackSlot(VirtReg);
if (SS == VirtRegMap::NO_STACK_SLOT)
continue;
FoldedSS.insert(SS);
DOUT << " - StackSlot: " << SS << "\n";
Chris Lattner
committed
// If this folded instruction is just a use, check to see if it's a
// straight load from the virt reg slot.
if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
int FrameIdx;
Evan Cheng
committed
unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
if (DestReg && FrameIdx == SS) {
// If this spill slot is available, turn it into a copy (or nothing)
// instead of leaving it as a load!
if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
DOUT << "Promoted Load To Copy: " << MI;
if (DestReg != InReg) {
const TargetRegisterClass *RC = RegMap->getRegClass(VirtReg);
MRI->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
// Revisit the copy so we make sure to notice the effects of the
// operation on the destreg (either needing to RA it if it's
// virtual or needing to clobber any values if it's physical).
NextMII = &MI;
--NextMII; // backtrack to the copy.
BackTracked = true;
} else
DOUT << "Removing now-noop copy: " << MI;
Evan Cheng
committed
MBB.erase(&MI);
Erased = true;
goto ProcessNextInst;
Chris Lattner
committed
}
} else {
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
SmallVector<MachineInstr*, 4> NewMIs;
if (PhysReg &&
MRI->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
MBB.insert(MII, NewMIs[0]);
MBB.erase(&MI);
Erased = true;
--NextMII; // backtrack to the unfolded instruction.
BackTracked = true;
goto ProcessNextInst;
}
Chris Lattner
committed
}
Chris Lattner
committed
}
Chris Lattner
committed
Chris Lattner
committed
// If this reference is not a use, any previous store is now dead.
// Otherwise, the store to this stack slot is not dead anymore.
MachineInstr* DeadStore = MaybeDeadStores[SS];
if (DeadStore) {
Evan Cheng
committed
if (MR & VirtRegMap::isModRef) {
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
SmallVector<MachineInstr*, 4> NewMIs;
if (PhysReg &&
DeadStore->findRegisterUseOperandIdx(PhysReg, true) != -1 &&
MRI->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) {
MBB.insert(MII, NewMIs[0]);
NewStore = NewMIs[1];
MBB.insert(MII, NewStore);
MBB.erase(&MI);
Erased = true;
--NextMII;
--NextMII; // backtrack to the unfolded instruction.
BackTracked = true;
}
if (isDead) { // Previous store is dead.
Chris Lattner
committed
// If we get here, the store is dead, nuke it now.
DOUT << "Removed dead store:\t" << *DeadStore;
InvalidateKills(*DeadStore, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(DeadStore);
MBB.erase(DeadStore);
if (!NewStore)
++NumDSE;
Chris Lattner
committed
}
MaybeDeadStores[SS] = NULL;
if (NewStore) {
// Treat this store as a spill merged into a copy. That makes the
// stack slot value available.
VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
goto ProcessNextInst;
}
Chris Lattner
committed
}
// If the spill slot value is available, and this is a new definition of
// the value, the value is not available anymore.
if (MR & VirtRegMap::isMod) {
// Notice that the value in this stack slot has been modified.
Spills.ModifyStackSlotOrReMat(SS);
// If this is *just* a mod of the value, check to see if this is just a
// store to the spill slot (i.e. the spill got merged into the copy). If
// so, realize that the vreg is available now, and add the store to the
// MaybeDeadStore info.
int StackSlot;
if (!(MR & VirtRegMap::isRef)) {
if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
assert(MRegisterInfo::isPhysicalRegister(SrcReg) &&
"Src hasn't been allocated yet?");
// Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
// this as a potentially dead store in case there is a subsequent
// store into the stack slot without a read from it.
MaybeDeadStores[StackSlot] = &MI;
// If the stack slot value was previously available in some other
// register, change it now. Otherwise, make the register available,
// in PhysReg.
Spills.addAvailable(StackSlot, &MI, SrcReg, false/*don't clobber*/);
}
}
}
}
// Process all of the spilled defs.
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!(MO.isRegister() && MO.getReg() && MO.isDef()))
continue;
unsigned VirtReg = MO.getReg();
if (!MRegisterInfo::isVirtualRegister(VirtReg)) {
// Check to see if this is a noop copy. If so, eliminate the
// instruction before considering the dest reg to be changed.
unsigned Src, Dst;
if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
++NumDCE;
DOUT << "Removing now-noop copy: " << MI;
MBB.erase(&MI);
Erased = true;
Spills.disallowClobberPhysReg(VirtReg);
goto ProcessNextInst;
}
// If it's not a no-op copy, it clobbers the value in the destreg.
Spills.ClobberPhysReg(VirtReg);
ReusedOperands.markClobbered(VirtReg);
// Check to see if this instruction is a load from a stack slot into
// a register. If so, this provides the stack slot value in the reg.
int FrameIdx;
if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
assert(DestReg == VirtReg && "Unknown load situation!");
// If it is a folded reference, then it's not safe to clobber.
bool Folded = FoldedSS.count(FrameIdx);
// Otherwise, if it wasn't available, remember that it is now!
Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded);
goto ProcessNextInst;
unsigned SubIdx = MO.getSubReg();
bool DoReMat = VRM.isReMaterialized(VirtReg);
if (DoReMat)
ReMatDefs.insert(&MI);
// The only vregs left are stack slot definitions.
int StackSlot = VRM.getStackSlot(VirtReg);
const TargetRegisterClass *RC = RegMap->getRegClass(VirtReg);
// If this def is part of a two-address operand, make sure to execute
// the store from the correct physical register.
unsigned PhysReg;
int TiedOp = MI.getInstrDescriptor()->findTiedToSrcOperand(i);
if (SubIdx) {
unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, MRI);
assert(SuperReg && MRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
"Can't find corresponding super-register!");
PhysReg = SuperReg;
}
} else {
PhysReg = VRM.getPhys(VirtReg);
if (ReusedOperands.isClobbered(PhysReg)) {
// Another def has taken the assigned physreg. It must have been a
// use&def which got it due to reuse. Undo the reuse!
PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
Spills, MaybeDeadStores, RegKills, KillOps, VRM);
}
unsigned RReg = SubIdx ? MRI->getSubReg(PhysReg, SubIdx) : PhysReg;
ReusedOperands.markClobbered(RReg);
MI.getOperand(i).setReg(RReg);
if (!MO.isDead()) {
MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, LastStore,
Spills, ReMatDefs, RegKills, KillOps, VRM);
// Check to see if this is a noop copy. If so, eliminate the
// instruction before considering the dest reg to be changed.
{
unsigned Src, Dst;
if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
++NumDCE;
DOUT << "Removing now-noop copy: " << MI;
MBB.erase(&MI);
Erased = true;
UpdateKills(*LastStore, RegKills, KillOps);
goto ProcessNextInst;
}
Chris Lattner
committed
ProcessNextInst:
Evan Cheng
committed
if (!Erased && !BackTracked)
for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II)
UpdateKills(*II, RegKills, KillOps);
MII = NextMII;
}
llvm::Spiller* llvm::createSpiller() {
switch (SpillerOpt) {
default: assert(0 && "Unreachable!");
case local:
return new LocalSpiller();
case simple:
return new SimpleSpiller();
}