"llvm/lib/CodeGen/TargetInstrInfoImpl.cpp" did not exist on "dcff2eb0e87ec6255ec08ce821bdc92adea8a755"
Newer
Older
/// movl %eax, -32(%ebp)
/// movl -36(%ebp), %eax
/// ==>
/// xorl %edi, %eax
/// orl -36(%ebp), %eax
/// mov %eax, -32(%ebp)
/// This enables unfolding optimization for a subsequent instruction which will
/// also eliminate the newly introduced store instruction.
bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
Evan Cheng
committed
MachineBasicBlock::iterator &MII,
Evan Cheng
committed
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps,
VirtRegMap &VRM) {
MachineFunction &MF = *MBB.getParent();
MachineInstr &MI = *MII;
unsigned UnfoldedOpc = 0;
unsigned UnfoldPR = 0;
unsigned UnfoldVR = 0;
int FoldedSS = VirtRegMap::NO_STACK_SLOT;
VirtRegMap::MI2VirtMapTy::const_iterator I, End;
Evan Cheng
committed
for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
// Only transform a MI that folds a single register.
if (UnfoldedOpc)
return false;
UnfoldVR = I->second.first;
VirtRegMap::ModRef MR = I->second.second;
Evan Cheng
committed
// MI2VirtMap be can updated which invalidate the iterator.
// Increment the iterator first.
++I;
if (VRM.isAssignedReg(UnfoldVR))
continue;
// If this reference is not a use, any previous store is now dead.
// Otherwise, the store to this stack slot is not dead anymore.
FoldedSS = VRM.getStackSlot(UnfoldVR);
MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
if (DeadStore && (MR & VirtRegMap::isModRef)) {
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
if (!PhysReg || !DeadStore->readsRegister(PhysReg))
UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
false, true);
}
}
if (!UnfoldedOpc)
return false;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
continue;
unsigned VirtReg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
continue;
if (VRM.isAssignedReg(VirtReg)) {
unsigned PhysReg = VRM.getPhys(VirtReg);
if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
return false;
} else if (VRM.isReMaterialized(VirtReg))
continue;
int SS = VRM.getStackSlot(VirtReg);
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
if (PhysReg) {
if (TRI->regsOverlap(PhysReg, UnfoldPR))
if (VRM.hasPhys(VirtReg)) {
PhysReg = VRM.getPhys(VirtReg);
if (!TRI->regsOverlap(PhysReg, UnfoldPR))
continue;
}
// Ok, we'll need to reload the value into a register which makes
// it impossible to perform the store unfolding optimization later.
// Let's see if it is possible to fold the load if the store is
// unfolded. This allows us to perform the store unfolding
// optimization.
SmallVector<MachineInstr*, 4> NewMIs;
if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
assert(NewMIs.size() == 1);
MachineInstr *NewMI = NewMIs.back();
NewMIs.clear();
int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
SmallVector<unsigned, 1> Ops;
Ops.push_back(Idx);
MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
VRM.addSpillSlotUse(SS, FoldedMI);
Evan Cheng
committed
if (!VRM.hasPhys(UnfoldVR))
VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
MII = MBB.insert(MII, FoldedMI);
MF.DeleteMachineInstr(NewMI);
MF.DeleteMachineInstr(NewMI);
Evan Cheng
committed
/// CommuteToFoldReload -
/// Look for
/// r1 = load fi#1
/// r1 = op r1, r2<kill>
/// store r1, fi#1
///
/// If op is commutable and r2 is killed, then we can xform these to
/// r2 = op r2, fi#1
/// store r2, fi#1
bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MII,
unsigned VirtReg, unsigned SrcReg, int SS,
Evan Cheng
committed
AvailableSpills &Spills,
Evan Cheng
committed
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps,
const TargetRegisterInfo *TRI,
VirtRegMap &VRM) {
if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
return false;
MachineFunction &MF = *MBB.getParent();
MachineInstr &MI = *MII;
MachineBasicBlock::iterator DefMII = prior(MII);
MachineInstr *DefMI = DefMII;
const TargetInstrDesc &TID = DefMI->getDesc();
unsigned NewDstIdx;
if (DefMII != MBB.begin() &&
TID.isCommutable() &&
TII->CommuteChangesDestination(DefMI, NewDstIdx)) {
MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
unsigned NewReg = NewDstMO.getReg();
if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
return false;
MachineInstr *ReloadMI = prior(DefMII);
int FrameIdx;
unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
if (DestReg != SrcReg || FrameIdx != SS)
return false;
int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
if (UseIdx == -1)
return false;
int DefIdx = TID.getOperandConstraint(UseIdx, TOI::TIED_TO);
if (DefIdx == -1)
return false;
assert(DefMI->getOperand(DefIdx).isReg() &&
Evan Cheng
committed
DefMI->getOperand(DefIdx).getReg() == SrcReg);
// Now commute def instruction.
MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
Evan Cheng
committed
if (!CommutedMI)
return false;
SmallVector<unsigned, 1> Ops;
Evan Cheng
committed
Ops.push_back(NewDstIdx);
MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
// Not needed since foldMemoryOperand returns new MI.
MF.DeleteMachineInstr(CommutedMI);
Evan Cheng
committed
return false;
VRM.addSpillSlotUse(SS, FoldedMI);
VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
// Insert new def MI and spill MI.
const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC);
Evan Cheng
committed
MII = prior(MII);
MachineInstr *StoreMI = MII;
VRM.addSpillSlotUse(SS, StoreMI);
VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack.
// Delete all 3 old instructions.
InvalidateKills(*ReloadMI, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(ReloadMI);
MBB.erase(ReloadMI);
InvalidateKills(*DefMI, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(DefMI);
MBB.erase(DefMI);
InvalidateKills(MI, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(&MI);
MBB.erase(&MI);
Evan Cheng
committed
// If NewReg was previously holding value of some SS, it's now clobbered.
// This has to be done now because it's a physical register. When this
// instruction is re-visited, it's ignored.
Spills.ClobberPhysReg(NewReg);
Evan Cheng
committed
++NumCommutes;
return true;
}
return false;
}
/// findSuperReg - Find the SubReg's super-register of given register class
/// where its SubIdx sub-register is SubReg.
static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
unsigned SubIdx, const TargetRegisterInfo *TRI) {
for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
I != E; ++I) {
unsigned Reg = *I;
if (TRI->getSubReg(Reg, SubIdx) == SubReg)
return Reg;
}
return 0;
}
/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
/// the last store to the same slot is now dead. If so, remove the last store.
void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MII,
int Idx, unsigned PhysReg, int StackSlot,
const TargetRegisterClass *RC,
bool isAvailable, MachineInstr *&LastStore,
AvailableSpills &Spills,
SmallSet<MachineInstr*, 4> &ReMatDefs,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps,
Owen Anderson
committed
TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
MachineInstr *StoreMI = next(MII);
VRM.addSpillSlotUse(StackSlot, StoreMI);
DOUT << "Store:\t" << *StoreMI;
// If there is a dead store to this stack slot, nuke it now.
if (LastStore) {
DOUT << "Removed dead store:\t" << *LastStore;
++NumDSE;
SmallVector<unsigned, 2> KillRegs;
InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs);
MachineBasicBlock::iterator PrevMII = LastStore;
bool CheckDef = PrevMII != MBB.begin();
if (CheckDef)
--PrevMII;
VRM.RemoveMachineInstrFromMaps(LastStore);
if (CheckDef) {
// Look at defs of killed registers on the store. Mark the defs
// as dead since the store has been deleted and they aren't
// being reused.
for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
bool HasOtherDef = false;
if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) {
MachineInstr *DeadDef = PrevMII;
if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
// FIXME: This assumes a remat def does not have side
// effects.
++NumDRM;
}
}
}
}
}
// If the stack slot value was previously available in some other
// register, change it now. Otherwise, make the register available,
// in PhysReg.
Spills.ModifyStackSlotOrReMat(StackSlot);
Spills.ClobberPhysReg(PhysReg);
Evan Cheng
committed
Spills.addAvailable(StackSlot, PhysReg, isAvailable);
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
/// TransferDeadness - A identity copy definition is dead and it's being
/// removed. Find the last def or use and mark it as dead / kill.
void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
unsigned Reg, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
int LastUDDist = -1;
MachineInstr *LastUDMI = NULL;
for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
RE = RegInfo->reg_end(); RI != RE; ++RI) {
MachineInstr *UDMI = &*RI;
if (UDMI->getParent() != MBB)
continue;
DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
if (DI == DistanceMap.end() || DI->second > CurDist)
continue;
if ((int)DI->second < LastUDDist)
continue;
LastUDDist = DI->second;
LastUDMI = UDMI;
}
if (LastUDMI) {
const TargetInstrDesc &TID = LastUDMI->getDesc();
MachineOperand *LastUD = NULL;
for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = LastUDMI->getOperand(i);
if (!MO.isReg() || MO.getReg() != Reg)
continue;
if (!LastUD || (LastUD->isUse() && MO.isDef()))
LastUD = &MO;
if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1)
return;
}
if (LastUD->isDef())
LastUD->setIsDead();
else {
LastUD->setIsKill();
RegKills.set(Reg);
KillOps[Reg] = LastUD;
}
}
}
Evan Cheng
committed
/// hasLaterNon2AddrUse - If the MI has another use of the specified virtual
/// register later and it's not a two-address, return true. That means it's
/// safe to mark the current use at 'i' isKill.
static bool hasLaterNon2AddrUse(MachineInstr &MI, unsigned i, unsigned VirtReg){
const TargetInstrDesc &TID = MI.getDesc();
++i;
for (unsigned e = TID.getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.getReg() != VirtReg)
continue;
if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1)
return true;
}
return false;
}
/// rewriteMBB - Keep track of which spills are available even after the
/// register allocator is done with them. If possible, avid reloading vregs.
Evan Cheng
committed
void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM,
Evan Cheng
committed
AvailableSpills &Spills, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
Evan Cheng
committed
DOUT << "\n**** Local spiller rewriting MBB '"
<< MBB.getBasicBlock()->getName() << ":\n";
MachineFunction &MF = *MBB.getParent();
Owen Anderson
committed
Chris Lattner
committed
// MaybeDeadStores - When we need to write a value back into a stack slot,
// keep track of the inserted store. If the stack slot value is never read
// (because the value was used from some available register, for example), and
// subsequently stored to, the original store is dead. This map keeps track
// of inserted stores that are not used. If we see a subsequent store to the
// same stack slot, the original store is deleted.
std::vector<MachineInstr*> MaybeDeadStores;
MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
Chris Lattner
committed
// ReMatDefs - These are rematerializable def MIs which are not deleted.
SmallSet<MachineInstr*, 4> ReMatDefs;
Evan Cheng
committed
// Clear kill info.
RegKills.reset();
KillOps.clear();
KillOps.resize(TRI->getNumRegs(), NULL);
Evan Cheng
committed
unsigned Dist = 0;
DistanceMap.clear();
for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
MII != E; ) {
MachineBasicBlock::iterator NextMII = MII; ++NextMII;
Evan Cheng
committed
VirtRegMap::MI2VirtMapTy::const_iterator I, End;
Evan Cheng
committed
bool Erased = false;
bool BackTracked = false;
if (PrepForUnfoldOpti(MBB, MII,
MaybeDeadStores, Spills, RegKills, KillOps, VRM))
NextMII = next(MII);
const TargetInstrDesc &TID = MI.getDesc();
Evan Cheng
committed
if (VRM.hasEmergencySpills(&MI)) {
// Spill physical register(s) in the rare case the allocator has run out
// of registers to allocate.
SmallSet<int, 4> UsedSS;
std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI);
for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
unsigned PhysReg = EmSpills[i];
const TargetRegisterClass *RC =
TRI->getPhysicalRegisterRegClass(PhysReg);
assert(RC && "Unable to determine register class!");
int SS = VRM.getEmergencySpillSlot(RC);
if (UsedSS.count(SS))
assert(0 && "Need to spill more than one physical registers!");
UsedSS.insert(SS);
TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC);
MachineInstr *StoreMI = prior(MII);
VRM.addSpillSlotUse(SS, StoreMI);
TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC);
MachineInstr *LoadMI = next(MII);
VRM.addSpillSlotUse(SS, LoadMI);
++NumPSpills;
Evan Cheng
committed
}
Evan Cheng
committed
}
// Insert restores here if asked to.
if (VRM.isRestorePt(&MI)) {
std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
Evan Cheng
committed
unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
if (!VRM.getPreSplitReg(VirtReg))
continue; // Split interval spilled again.
unsigned Phys = VRM.getPhys(VirtReg);
RegInfo->setPhysRegUsed(Phys);
Evan Cheng
committed
// Check if the value being restored if available. If so, it must be
// from a predecessor BB that fallthrough into this BB. We do not
// expect:
// BB1:
// r1 = load fi#1
// ...
// = r1<kill>
// ... # r1 not clobbered
// ...
// = load fi#1
bool DoReMat = VRM.isReMaterialized(VirtReg);
int SSorRMId = DoReMat
? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
Evan Cheng
committed
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
Evan Cheng
committed
unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
Evan Cheng
committed
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
if (InReg == Phys) {
// If the value is already available in the expected register, save
// a reload / remat.
if (SSorRMId)
DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1;
else
DOUT << "Reusing SS#" << SSorRMId;
DOUT << " from physreg "
<< TRI->getName(InReg) << " for vreg"
<< VirtReg <<" instead of reloading into physreg "
<< TRI->getName(Phys) << "\n";
++NumOmitted;
continue;
} else if (InReg && InReg != Phys) {
if (SSorRMId)
DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1;
else
DOUT << "Reusing SS#" << SSorRMId;
DOUT << " from physreg "
<< TRI->getName(InReg) << " for vreg"
<< VirtReg <<" by copying it into physreg "
<< TRI->getName(Phys) << "\n";
// If the reloaded / remat value is available in another register,
// copy it to the desired register.
TII->copyRegToReg(MBB, &MI, Phys, InReg, RC, RC);
// This invalidates Phys.
Spills.ClobberPhysReg(Phys);
// Remember it's available.
Spills.addAvailable(SSorRMId, Phys);
// Mark is killed.
MachineInstr *CopyMI = prior(MII);
MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg);
KillOpnd->setIsKill();
UpdateKills(*CopyMI, RegKills, KillOps, TRI);
DOUT << '\t' << *CopyMI;
++NumCopified;
continue;
}
if (VRM.isReMaterialized(VirtReg)) {
ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM);
} else {
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
Evan Cheng
committed
TII->loadRegFromStackSlot(MBB, &MI, Phys, SSorRMId, RC);
Evan Cheng
committed
VRM.addSpillSlotUse(SSorRMId, LoadMI);
++NumLoads;
}
Evan Cheng
committed
// This invalidates Phys.
Spills.ClobberPhysReg(Phys);
Evan Cheng
committed
// Remember it's available.
Spills.addAvailable(SSorRMId, Phys);
Evan Cheng
committed
UpdateKills(*prior(MII), RegKills, KillOps, TRI);
DOUT << '\t' << *prior(MII);
}
}
std::vector<std::pair<unsigned,bool> > &SpillRegs =
VRM.getSpillPtSpills(&MI);
for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
unsigned VirtReg = SpillRegs[i].first;
bool isKill = SpillRegs[i].second;
if (!VRM.getPreSplitReg(VirtReg))
continue; // Split interval spilled again.
const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
unsigned Phys = VRM.getPhys(VirtReg);
int StackSlot = VRM.getStackSlot(VirtReg);
Owen Anderson
committed
TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
MachineInstr *StoreMI = next(MII);
VRM.addSpillSlotUse(StackSlot, StoreMI);
VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
}
/// ReusedOperands - Keep track of operand reuse in case we need to undo
/// reuse.
ReuseInfo ReusedOperands(MI, TRI);
SmallVector<unsigned, 4> VirtUseOps;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.getReg() == 0)
Chris Lattner
committed
continue; // Ignore non-register operands.
Evan Cheng
committed
unsigned VirtReg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
Chris Lattner
committed
// Ignore physregs for spilling, but remember that it is used by this
// function.
RegInfo->setPhysRegUsed(VirtReg);
Chris Lattner
committed
continue;
}
// We want to process implicit virtual register uses first.
if (MO.isImplicit())
Evan Cheng
committed
// If the virtual register is implicitly defined, emit a implicit_def
// before so scavenger knows it's "defined".
VirtUseOps.insert(VirtUseOps.begin(), i);
else
VirtUseOps.push_back(i);
}
// Process all of the spilled uses and all non spilled reg references.
Evan Cheng
committed
SmallVector<int, 2> PotentialDeadStoreSlots;
for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
unsigned i = VirtUseOps[j];
MachineOperand &MO = MI.getOperand(i);
unsigned VirtReg = MO.getReg();
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Not a virtual register?");
unsigned SubIdx = MO.getSubReg();
if (VRM.isAssignedReg(VirtReg)) {
Chris Lattner
committed
// This virtual register was assigned a physreg!
unsigned Phys = VRM.getPhys(VirtReg);
RegInfo->setPhysRegUsed(Phys);
if (MO.isDef())
ReusedOperands.markClobbered(Phys);
unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
Evan Cheng
committed
MI.getOperand(i).setReg(RReg);
Evan Cheng
committed
if (VRM.isImplicitlyDefined(VirtReg))
BuildMI(MBB, &MI, MI.getDebugLoc(),
TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
Chris Lattner
committed
continue;
}
// This virtual register is now known to be a spilled value.
if (!MO.isUse())
continue; // Handle defs in the loop below (handle use&def here though)
bool DoReMat = VRM.isReMaterialized(VirtReg);
int SSorRMId = DoReMat
? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
Evan Cheng
committed
int ReuseSlot = SSorRMId;
Chris Lattner
committed
// Check to see if this stack slot is available.
Evan Cheng
committed
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
Evan Cheng
committed
// If this is a sub-register use, make sure the reuse register is in the
// right register class. For example, for x86 not all of the 32-bit
// registers have accessible sub-registers.
// Similarly so for EXTRACT_SUBREG. Consider this:
// EDI = op
// MOV32_mr fi#1, EDI
// ...
// = EXTRACT_SUBREG fi#1
// fi#1 is available in EDI, but it cannot be reused because it's not in
// the right register file.
if (PhysReg &&
(SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) {
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
Evan Cheng
committed
if (!RC->contains(PhysReg))
PhysReg = 0;
}
Evan Cheng
committed
if (PhysReg) {
Chris Lattner
committed
// This spilled operand might be part of a two-address operand. If this
// is the case, then changing it will necessarily require changing the
// def part of the instruction as well. However, in some cases, we
// aren't allowed to modify the reused register. If none of these cases
// apply, reuse it.
bool CanReuse = true;
int ti = TID.getOperandConstraint(i, TOI::TIED_TO);
Evan Cheng
committed
if (ti != -1) {
Chris Lattner
committed
// Okay, we have a two address operand. We can reuse this physreg as
// long as we are allowed to clobber the value and there isn't an
// earlier def that has already clobbered the physreg.
Evan Cheng
committed
CanReuse = Spills.canClobberPhysReg(ReuseSlot) &&
!ReusedOperands.isClobbered(PhysReg);
Chris Lattner
committed
}
if (CanReuse) {
// If this stack slot value is already available, reuse it!
Evan Cheng
committed
if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
Evan Cheng
committed
DOUT << "Reusing SS#" << ReuseSlot;
<< VirtReg <<" instead of reloading into physreg "
<< TRI->getName(VRM.getPhys(VirtReg)) << "\n";
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
Evan Cheng
committed
MI.getOperand(i).setReg(RReg);
// The only technical detail we have is that we don't know that
// PhysReg won't be clobbered by a reloaded stack slot that occurs
// later in the instruction. In particular, consider 'op V1, V2'.
// If V1 is available in physreg R0, we would choose to reuse it
// here, instead of reloading it into the register the allocator
// indicated (say R1). However, V2 might have to be reloaded
// later, and it might indicate that it needs to live in R0. When
// this occurs, we need to have information available that
// indicates it is safe to use R1 for the reload instead of R0.
//
// To further complicate matters, we might conflict with an alias,
// or R0 and R1 might not be compatible with each other. In this
// case, we actually insert a reload for V1 in R1, ensuring that
// we can get at R0 or its alias.
Evan Cheng
committed
ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
VRM.getPhys(VirtReg), VirtReg);
if (ti != -1)
// Only mark it clobbered if this is a use&def operand.
ReusedOperands.markClobbered(PhysReg);
++NumReused;
if (MI.getOperand(i).isKill() &&
ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
Evan Cheng
committed
// The store of this spilled value is potentially dead, but we
// won't know for certain until we've confirmed that the re-use
// above is valid, which means waiting until the other operands
// are processed. For now we just track the spill slot, we'll
// remove it after the other operands are processed if valid.
PotentialDeadStoreSlots.push_back(ReuseSlot);
}
Evan Cheng
committed
// Mark is isKill if it's there no other uses of the same virtual
// register and it's not a two-address operand. IsKill will be
// unset if reg is reused.
if (ti == -1 && !hasLaterNon2AddrUse(MI, i, VirtReg))
MI.getOperand(i).setIsKill();
continue;
Evan Cheng
committed
} // CanReuse
// Otherwise we have a situation where we have a two-address instruction
// whose mod/ref operand needs to be reloaded. This reload is already
// available in some register "PhysReg", but if we used PhysReg as the
// operand to our 2-addr instruction, the instruction would modify
// PhysReg. This isn't cool if something later uses PhysReg and expects
// to get its initial value.
Chris Lattner
committed
//
// To avoid this problem, and to avoid doing a load right after a store,
// we emit a copy from PhysReg into the designated register for this
// operand.
unsigned DesignatedReg = VRM.getPhys(VirtReg);
assert(DesignatedReg && "Must map virtreg to physreg!");
// Note that, if we reused a register for a previous operand, the
// register we want to reload into might not actually be
// available. If this occurs, use the register indicated by the
// reuser.
if (ReusedOperands.hasReuses())
DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI,
Spills, MaybeDeadStores, RegKills, KillOps, VRM);
Chris Lattner
committed
// If the mapped designated register is actually the physreg we have
// incoming, we don't need to inserted a dead copy.
if (DesignatedReg == PhysReg) {
// If this stack slot value is already available, reuse it!
Evan Cheng
committed
if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
Evan Cheng
committed
DOUT << "Reusing SS#" << ReuseSlot;
DOUT << " from physreg " << TRI->getName(PhysReg)
<< " instead of reloading into same physreg.\n";
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
Evan Cheng
committed
MI.getOperand(i).setReg(RReg);
ReusedOperands.markClobbered(RReg);
Chris Lattner
committed
++NumReused;
continue;
}
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
RegInfo->setPhysRegUsed(DesignatedReg);
ReusedOperands.markClobbered(DesignatedReg);
Owen Anderson
committed
TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC);
MachineInstr *CopyMI = prior(MII);
Evan Cheng
committed
UpdateKills(*CopyMI, RegKills, KillOps, TRI);
// This invalidates DesignatedReg.
Spills.ClobberPhysReg(DesignatedReg);
Evan Cheng
committed
Spills.addAvailable(ReuseSlot, DesignatedReg);
Evan Cheng
committed
unsigned RReg =
SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
Evan Cheng
committed
MI.getOperand(i).setReg(RReg);
DOUT << '\t' << *prior(MII);
Chris Lattner
committed
++NumReused;
continue;
Chris Lattner
committed
// Otherwise, reload it and remember that we have it.
PhysReg = VRM.getPhys(VirtReg);
Chris Lattner
committed
assert(PhysReg && "Must map virtreg to physreg!");
Chris Lattner
committed
// Note that, if we reused a register for a previous operand, the
// register we want to reload into might not actually be
// available. If this occurs, use the register indicated by the
// reuser.
Chris Lattner
committed
if (ReusedOperands.hasReuses())
PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
Spills, MaybeDeadStores, RegKills, KillOps, VRM);
Chris Lattner
committed
RegInfo->setPhysRegUsed(PhysReg);
ReusedOperands.markClobbered(PhysReg);
if (DoReMat) {
ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM);
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
Owen Anderson
committed
TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
MachineInstr *LoadMI = prior(MII);
VRM.addSpillSlotUse(SSorRMId, LoadMI);
Chris Lattner
committed
// This invalidates PhysReg.
Chris Lattner
committed
Spills.ClobberPhysReg(PhysReg);
Chris Lattner
committed
// Any stores to this stack slot are not dead anymore.
if (!DoReMat)
MaybeDeadStores[SSorRMId] = NULL;
Evan Cheng
committed
Spills.addAvailable(SSorRMId, PhysReg);
// Assumes this is the last use. IsKill will be unset if reg is reused
// unless it's a two-address operand.
if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1)
MI.getOperand(i).setIsKill();
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
Evan Cheng
committed
MI.getOperand(i).setReg(RReg);
Evan Cheng
committed
UpdateKills(*prior(MII), RegKills, KillOps, TRI);
DOUT << '\t' << *prior(MII);
}
Evan Cheng
committed
// Ok - now we can remove stores that have been confirmed dead.
for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
// This was the last use and the spilled value is still available
// for reuse. That means the spill was unnecessary!
int PDSSlot = PotentialDeadStoreSlots[j];
MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
if (DeadStore) {
DOUT << "Removed dead store:\t" << *DeadStore;
InvalidateKills(*DeadStore, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(DeadStore);
MBB.erase(DeadStore);
MaybeDeadStores[PDSSlot] = NULL;
++NumDSE;
}
}
DOUT << '\t' << MI;
// If we have folded references to memory operands, make sure we clear all
// physical registers that may contain the value of the spilled virtual
// register
Evan Cheng
committed
for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
Chris Lattner
committed
unsigned VirtReg = I->second.first;
VirtRegMap::ModRef MR = I->second.second;
DOUT << "Folded vreg: " << VirtReg << " MR: " << MR;
Evan Cheng
committed
// MI2VirtMap be can updated which invalidate the iterator.
// Increment the iterator first.
++I;
Chris Lattner
committed
int SS = VRM.getStackSlot(VirtReg);
if (SS == VirtRegMap::NO_STACK_SLOT)
continue;
FoldedSS.insert(SS);
DOUT << " - StackSlot: " << SS << "\n";
Chris Lattner
committed
// If this folded instruction is just a use, check to see if it's a
// straight load from the virt reg slot.
if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
int FrameIdx;
Evan Cheng
committed
unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
if (DestReg && FrameIdx == SS) {
// If this spill slot is available, turn it into a copy (or nothing)
// instead of leaving it as a load!
if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
DOUT << "Promoted Load To Copy: " << MI;
if (DestReg != InReg) {
const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
Owen Anderson
committed
TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
unsigned SubIdx = DefMO->getSubReg();
Evan Cheng
committed
// Revisit the copy so we make sure to notice the effects of the
// operation on the destreg (either needing to RA it if it's
// virtual or needing to clobber any values if it's physical).
NextMII = &MI;
--NextMII; // backtrack to the copy.
// Propagate the sub-register index over.
if (SubIdx) {
DefMO = NextMII->findRegisterDefOperand(DestReg);
DefMO->setSubReg(SubIdx);
}
// Mark is killed.
MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg);
KillOpnd->setIsKill();
Evan Cheng
committed
BackTracked = true;
Evan Cheng
committed
} else {
Evan Cheng
committed
DOUT << "Removing now-noop copy: " << MI;
Evan Cheng
committed
// Unset last kill since it's being reused.
InvalidateKill(InReg, RegKills, KillOps);
}
Evan Cheng
committed
Evan Cheng
committed
MBB.erase(&MI);
Erased = true;
goto ProcessNextInst;
Chris Lattner
committed
}
} else {
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
SmallVector<MachineInstr*, 4> NewMIs;
if (PhysReg &&
TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
MBB.erase(&MI);
Erased = true;
--NextMII; // backtrack to the unfolded instruction.
BackTracked = true;
goto ProcessNextInst;
}
Chris Lattner
committed
}
Chris Lattner
committed
}
Chris Lattner
committed
Chris Lattner
committed
// If this reference is not a use, any previous store is now dead.
// Otherwise, the store to this stack slot is not dead anymore.
MachineInstr* DeadStore = MaybeDeadStores[SS];
if (DeadStore) {
Evan Cheng
committed
if (MR & VirtRegMap::isModRef) {
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
SmallVector<MachineInstr*, 4> NewMIs;
// We can reuse this physreg as long as we are allowed to clobber
// the value and there isn't an earlier def that has already clobbered
// the physreg.
!TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
MachineOperand *KillOpnd =
DeadStore->findRegisterUseOperand(PhysReg, true);
// Note, if the store is storing a sub-register, it's possible the
// super-register is needed below.
if (KillOpnd && !KillOpnd->getSubReg() &&
TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
Evan Cheng
committed
MBB.insert(MII, NewMIs[0]);
NewStore = NewMIs[1];
MBB.insert(MII, NewStore);
VRM.addSpillSlotUse(SS, NewStore);
VRM.RemoveMachineInstrFromMaps(&MI);
MBB.erase(&MI);
Erased = true;
--NextMII;
--NextMII; // backtrack to the unfolded instruction.
BackTracked = true;
isDead = true;
}
}
if (isDead) { // Previous store is dead.
Chris Lattner
committed
// If we get here, the store is dead, nuke it now.
DOUT << "Removed dead store:\t" << *DeadStore;
InvalidateKills(*DeadStore, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(DeadStore);
MBB.erase(DeadStore);
if (!NewStore)
++NumDSE;
Chris Lattner
committed
}
MaybeDeadStores[SS] = NULL;
if (NewStore) {
// Treat this store as a spill merged into a copy. That makes the
// stack slot value available.
VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
goto ProcessNextInst;
}
Chris Lattner
committed
}
// If the spill slot value is available, and this is a new definition of
// the value, the value is not available anymore.
if (MR & VirtRegMap::isMod) {
// Notice that the value in this stack slot has been modified.
Spills.ModifyStackSlotOrReMat(SS);
// If this is *just* a mod of the value, check to see if this is just a
// store to the spill slot (i.e. the spill got merged into the copy). If
// so, realize that the vreg is available now, and add the store to the
// MaybeDeadStore info.
int StackSlot;
if (!(MR & VirtRegMap::isRef)) {
if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
"Src hasn't been allocated yet?");
Evan Cheng
committed
if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
Evan Cheng
committed
Spills, RegKills, KillOps, TRI, VRM)) {
Evan Cheng
committed
NextMII = next(MII);
BackTracked = true;
goto ProcessNextInst;
}
// Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
// this as a potentially dead store in case there is a subsequent
// store into the stack slot without a read from it.
MaybeDeadStores[StackSlot] = &MI;
// If the stack slot value was previously available in some other
Evan Cheng
committed
// register, change it now. Otherwise, make the register
// available in PhysReg.
Evan Cheng
committed
Spills.addAvailable(StackSlot, SrcReg, false/*!clobber*/);
}
}
}
}
// Process all of the spilled defs.
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!(MO.isReg() && MO.getReg() && MO.isDef()))
if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
// Check to see if this is a noop copy. If so, eliminate the
// instruction before considering the dest reg to be changed.
Evan Cheng
committed
unsigned Src, Dst, SrcSR, DstSR;
if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
++NumDCE;
DOUT << "Removing now-noop copy: " << MI;
SmallVector<unsigned, 2> KillRegs;
InvalidateKills(MI, RegKills, KillOps, &KillRegs);
if (MO.isDead() && !KillRegs.empty()) {
// Source register or an implicit super/sub-register use is killed.
assert(KillRegs[0] == Dst ||
TRI->isSubRegister(KillRegs[0], Dst) ||
TRI->isSuperRegister(KillRegs[0], Dst));
// Last def is now dead.
TransferDeadness(&MBB, Dist, Src, RegKills, KillOps);
}
VRM.RemoveMachineInstrFromMaps(&MI);
MBB.erase(&MI);
Erased = true;
Spills.disallowClobberPhysReg(VirtReg);
goto ProcessNextInst;
}
// If it's not a no-op copy, it clobbers the value in the destreg.
Spills.ClobberPhysReg(VirtReg);
ReusedOperands.markClobbered(VirtReg);
// Check to see if this instruction is a load from a stack slot into
// a register. If so, this provides the stack slot value in the reg.
int FrameIdx;
if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
assert(DestReg == VirtReg && "Unknown load situation!");
// If it is a folded reference, then it's not safe to clobber.
bool Folded = FoldedSS.count(FrameIdx);
// Otherwise, if it wasn't available, remember that it is now!
Evan Cheng
committed
Spills.addAvailable(FrameIdx, DestReg, !Folded);