Newer
Older
BitVector Defs(TRI->getNumRegs());
BitVector Uses(TRI->getNumRegs());
SmallVector<unsigned, 4> LocalUses;
SmallVector<unsigned, 4> Kills;
// Take a look at 2 instructions at most.
unsigned Count = 0;
while (Count < 2) {
if (MII == MBB.begin())
break;
MachineInstr *PrevMI = prior(MII);
MII = PrevMI;
if (PrevMI->isDebugValue())
continue; // Skip over dbg_value instructions.
++Count;
for (unsigned i = 0, e = PrevMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = PrevMI->getOperand(i);
if (!MO.isReg() || MO.getReg() == 0)
continue;
unsigned Reg = MO.getReg();
if (MO.isDef()) {
Defs.set(Reg);
for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
Defs.set(*AS);
} else {
LocalUses.push_back(Reg);
if (MO.isKill() && AllocatableRegs[Reg])
Kills.push_back(Reg);
}
}
for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
unsigned Kill = Kills[i];
if (!Defs[Kill] && !Uses[Kill] &&
RC->contains(Kill))
return Kill;
}
for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
unsigned Reg = LocalUses[i];
Uses.set(Reg);
for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
Uses.set(*AS);
}
}
return 0;
}
static
Jakob Stoklund Olesen
committed
void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg,
const TargetRegisterInfo &TRI) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.getReg() == VirtReg)
Jakob Stoklund Olesen
committed
substitutePhysReg(MO, PhysReg, TRI);
}
}
Evan Cheng
committed
namespace {
struct RefSorter {
bool operator()(const std::pair<MachineInstr*, int> &A,
const std::pair<MachineInstr*, int> &B) {
return A.second < B.second;
}
};
// ***************************** //
// Local Spiller Implementation //
// ***************************** //
Nick Lewycky
committed
class LocalRewriter : public VirtRegRewriter {
MachineRegisterInfo *MRI;
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
VirtRegMap *VRM;
Jakob Stoklund Olesen
committed
LiveIntervals *LIs;
BitVector AllocatableRegs;
DenseMap<MachineInstr*, unsigned> DistanceMap;
DenseMap<int, SmallVector<MachineInstr*,4> > Slot2DbgValues;
MachineBasicBlock *MBB; // Basic block currently being processed.
bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
LiveIntervals* LIs);
private:
Jakob Stoklund Olesen
committed
void EraseInstr(MachineInstr *MI) {
VRM->RemoveMachineInstrFromMaps(MI);
LIs->RemoveMachineInstrFromMaps(MI);
MI->eraseFromParent();
}
bool OptimizeByUnfold2(unsigned VirtReg, int SS,
MachineBasicBlock::iterator &MII,
std::vector<MachineInstr*> &MaybeDeadStores,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
bool OptimizeByUnfold(MachineBasicBlock::iterator &MII,
std::vector<MachineInstr*> &MaybeDeadStores,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
bool CommuteToFoldReload(MachineBasicBlock::iterator &MII,
unsigned VirtReg, unsigned SrcReg, int SS,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps,
const TargetRegisterInfo *TRI);
void SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
int Idx, unsigned PhysReg, int StackSlot,
const TargetRegisterClass *RC,
bool isAvailable, MachineInstr *&LastStore,
AvailableSpills &Spills,
SmallSet<MachineInstr*, 4> &ReMatDefs,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
Jakob Stoklund Olesen
committed
void TransferDeadness(unsigned Reg, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
Jakob Stoklund Olesen
committed
bool InsertEmergencySpills(MachineInstr *MI);
bool InsertRestores(MachineInstr *MI,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
bool InsertSpills(MachineInstr *MI);
Jakob Stoklund Olesen
committed
void ProcessUses(MachineInstr &MI, AvailableSpills &Spills,
std::vector<MachineInstr*> &MaybeDeadStores,
BitVector &RegKills,
ReuseInfo &ReusedOperands,
std::vector<MachineOperand*> &KillOps);
void RewriteMBB(LiveIntervals *LIs,
AvailableSpills &Spills, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
};
}
bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm,
Jakob Stoklund Olesen
committed
LiveIntervals* lis) {
MRI = &MF.getRegInfo();
TRI = MF.getTarget().getRegisterInfo();
TII = MF.getTarget().getInstrInfo();
VRM = &vrm;
Jakob Stoklund Olesen
committed
LIs = lis;
AllocatableRegs = TRI->getAllocatableSet(MF);
DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
<< MF.getFunction()->getName() << "':\n");
DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
" reloads!) ****\n");
DEBUG(MF.print(dbgs(), LIs->getSlotIndexes()));
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
// Spills - Keep track of which spilled values are available in physregs
// so that we can choose to reuse the physregs instead of emitting
// reloads. This is usually refreshed per basic block.
AvailableSpills Spills(TRI, TII);
// Keep track of kill information.
BitVector RegKills(TRI->getNumRegs());
std::vector<MachineOperand*> KillOps;
KillOps.resize(TRI->getNumRegs(), NULL);
// SingleEntrySuccs - Successor blocks which have a single predecessor.
SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
// Traverse the basic blocks depth first.
MachineBasicBlock *Entry = MF.begin();
SmallPtrSet<MachineBasicBlock*,16> Visited;
for (df_ext_iterator<MachineBasicBlock*,
SmallPtrSet<MachineBasicBlock*,16> >
DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
DFI != E; ++DFI) {
MBB = *DFI;
if (!EarlyVisited.count(MBB))
RewriteMBB(LIs, Spills, RegKills, KillOps);
// If this MBB is the only predecessor of a successor. Keep the
// availability information and visit it next.
do {
// Keep visiting single predecessor successor as long as possible.
SinglePredSuccs.clear();
findSinglePredSuccessor(MBB, SinglePredSuccs);
if (SinglePredSuccs.empty())
MBB = 0;
else {
// FIXME: More than one successors, each of which has MBB has
// the only predecessor.
MBB = SinglePredSuccs[0];
if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
RewriteMBB(LIs, Spills, RegKills, KillOps);
}
}
} while (MBB);
// Clear the availability info.
Spills.clear();
}
DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
DEBUG(MF.print(dbgs(), LIs->getSlotIndexes()));
// Mark unused spill slots.
MachineFrameInfo *MFI = MF.getFrameInfo();
int SS = VRM->getLowSpillSlot();
if (SS != VirtRegMap::NO_STACK_SLOT) {
for (int e = VRM->getHighSpillSlot(); SS <= e; ++SS) {
SmallVector<MachineInstr*, 4> &DbgValues = Slot2DbgValues[SS];
if (!VRM->isSpillSlotUsed(SS)) {
MFI->RemoveStackObject(SS);
for (unsigned j = 0, ee = DbgValues.size(); j != ee; ++j) {
MachineInstr *DVMI = DbgValues[j];
DEBUG(dbgs() << "Removing debug info referencing FI#" << SS << '\n');
Jakob Stoklund Olesen
committed
EraseInstr(DVMI);
++NumDSS;
}
DbgValues.clear();
}
}
Slot2DbgValues.clear();
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
return true;
}
/// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
/// a scratch register is available.
/// xorq %r12<kill>, %r13
/// addq %rax, -184(%rbp)
/// addq %r13, -184(%rbp)
/// ==>
/// xorq %r12<kill>, %r13
/// movq -184(%rbp), %r12
/// addq %rax, %r12
/// addq %r13, %r12
/// movq %r12, -184(%rbp)
bool LocalRewriter::
OptimizeByUnfold2(unsigned VirtReg, int SS,
MachineBasicBlock::iterator &MII,
std::vector<MachineInstr*> &MaybeDeadStores,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
MachineBasicBlock::iterator NextMII = llvm::next(MII);
// Skip over dbg_value instructions.
while (NextMII != MBB->end() && NextMII->isDebugValue())
NextMII = llvm::next(NextMII);
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
if (NextMII == MBB->end())
return false;
if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
return false;
// Now let's see if the last couple of instructions happens to have freed up
// a register.
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
unsigned PhysReg = FindFreeRegister(MII, *MBB, RC, TRI, AllocatableRegs);
if (!PhysReg)
return false;
MachineFunction &MF = *MBB->getParent();
TRI = MF.getTarget().getRegisterInfo();
MachineInstr &MI = *MII;
if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, *VRM))
return false;
// If the next instruction also folds the same SS modref and can be unfoled,
// then it's worthwhile to issue a load from SS into the free register and
// then unfold these instructions.
if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM))
return false;
// Back-schedule reloads and remats.
ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, false, SS, TII, MF);
// Load from SS to the spare physical register.
Evan Cheng
committed
TII->loadRegFromStackSlot(*MBB, MII, PhysReg, SS, RC, TRI);
// This invalidates Phys.
Spills.ClobberPhysReg(PhysReg);
// Remember it's available.
Spills.addAvailable(SS, PhysReg);
MaybeDeadStores[SS] = NULL;
// Unfold current MI.
SmallVector<MachineInstr*, 4> NewMIs;
if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs))
llvm_unreachable("Unable unfold the load / store folding instruction!");
assert(NewMIs.size() == 1);
AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
VRM->transferRestorePts(&MI, NewMIs[0]);
MII = MBB->insert(MII, NewMIs[0]);
InvalidateKills(MI, TRI, RegKills, KillOps);
Jakob Stoklund Olesen
committed
EraseInstr(&MI);
++NumModRefUnfold;
// Unfold next instructions that fold the same SS.
do {
MachineInstr &NextMI = *NextMII;
NextMII = llvm::next(NextMII);
NewMIs.clear();
if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs))
llvm_unreachable("Unable unfold the load / store folding instruction!");
assert(NewMIs.size() == 1);
Jakob Stoklund Olesen
committed
AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
VRM->transferRestorePts(&NextMI, NewMIs[0]);
MBB->insert(NextMII, NewMIs[0]);
InvalidateKills(NextMI, TRI, RegKills, KillOps);
Jakob Stoklund Olesen
committed
EraseInstr(&NextMI);
++NumModRefUnfold;
// Skip over dbg_value instructions.
while (NextMII != MBB->end() && NextMII->isDebugValue())
NextMII = llvm::next(NextMII);
if (NextMII == MBB->end())
break;
} while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM));
// Store the value back into SS.
Evan Cheng
committed
TII->storeRegToStackSlot(*MBB, NextMII, PhysReg, true, SS, RC, TRI);
MachineInstr *StoreMI = prior(NextMII);
VRM->addSpillSlotUse(SS, StoreMI);
VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
return true;
}
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
/// OptimizeByUnfold - Turn a store folding instruction into a load folding
/// instruction. e.g.
/// xorl %edi, %eax
/// movl %eax, -32(%ebp)
/// movl -36(%ebp), %eax
/// orl %eax, -32(%ebp)
/// ==>
/// xorl %edi, %eax
/// orl -36(%ebp), %eax
/// mov %eax, -32(%ebp)
/// This enables unfolding optimization for a subsequent instruction which will
/// also eliminate the newly introduced store instruction.
bool LocalRewriter::
OptimizeByUnfold(MachineBasicBlock::iterator &MII,
std::vector<MachineInstr*> &MaybeDeadStores,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
MachineFunction &MF = *MBB->getParent();
MachineInstr &MI = *MII;
unsigned UnfoldedOpc = 0;
unsigned UnfoldPR = 0;
unsigned UnfoldVR = 0;
int FoldedSS = VirtRegMap::NO_STACK_SLOT;
VirtRegMap::MI2VirtMapTy::const_iterator I, End;
for (tie(I, End) = VRM->getFoldedVirts(&MI); I != End; ) {
// Only transform a MI that folds a single register.
if (UnfoldedOpc)
return false;
UnfoldVR = I->second.first;
VirtRegMap::ModRef MR = I->second.second;
// MI2VirtMap be can updated which invalidate the iterator.
// Increment the iterator first.
++I;
if (VRM->isAssignedReg(UnfoldVR))
continue;
// If this reference is not a use, any previous store is now dead.
// Otherwise, the store to this stack slot is not dead anymore.
FoldedSS = VRM->getStackSlot(UnfoldVR);
MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
if (DeadStore && (MR & VirtRegMap::isModRef)) {
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
if (!PhysReg || !DeadStore->readsRegister(PhysReg))
continue;
UnfoldPR = PhysReg;
UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
false, true);
}
if (!UnfoldedOpc) {
if (!UnfoldVR)
return false;
// Look for other unfolding opportunities.
return OptimizeByUnfold2(UnfoldVR, FoldedSS, MII, MaybeDeadStores, Spills,
RegKills, KillOps);
}
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
continue;
unsigned VirtReg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
continue;
if (VRM->isAssignedReg(VirtReg)) {
unsigned PhysReg = VRM->getPhys(VirtReg);
if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
return false;
} else if (VRM->isReMaterialized(VirtReg))
continue;
int SS = VRM->getStackSlot(VirtReg);
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
if (PhysReg) {
if (TRI->regsOverlap(PhysReg, UnfoldPR))
return false;
continue;
}
if (VRM->hasPhys(VirtReg)) {
PhysReg = VRM->getPhys(VirtReg);
if (!TRI->regsOverlap(PhysReg, UnfoldPR))
continue;
// Ok, we'll need to reload the value into a register which makes
// it impossible to perform the store unfolding optimization later.
// Let's see if it is possible to fold the load if the store is
// unfolded. This allows us to perform the store unfolding
// optimization.
SmallVector<MachineInstr*, 4> NewMIs;
if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
assert(NewMIs.size() == 1);
MachineInstr *NewMI = NewMIs.back();
Jakob Stoklund Olesen
committed
MBB->insert(MII, NewMI);
NewMIs.clear();
int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
assert(Idx != -1);
SmallVector<unsigned, 1> Ops;
Ops.push_back(Idx);
Jakob Stoklund Olesen
committed
MachineInstr *FoldedMI = TII->foldMemoryOperand(NewMI, Ops, SS);
NewMI->eraseFromParent();
if (FoldedMI) {
VRM->addSpillSlotUse(SS, FoldedMI);
if (!VRM->hasPhys(UnfoldVR))
VRM->assignVirt2Phys(UnfoldVR, UnfoldPR);
VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
Jakob Stoklund Olesen
committed
MII = FoldedMI;
InvalidateKills(MI, TRI, RegKills, KillOps);
Jakob Stoklund Olesen
committed
EraseInstr(&MI);
return true;
}
}
return false;
}
/// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
/// where SrcReg is r1 and it is tied to r0. Return true if after
/// commuting this instruction it will be r0 = op r2, r1.
static bool CommuteChangesDestination(MachineInstr *DefMI,
const TargetInstrDesc &TID,
unsigned SrcReg,
const TargetInstrInfo *TII,
unsigned &DstIdx) {
if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3)
return false;
if (!DefMI->getOperand(1).isReg() ||
DefMI->getOperand(1).getReg() != SrcReg)
return false;
unsigned DefIdx;
if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0)
return false;
unsigned SrcIdx1, SrcIdx2;
if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2))
return false;
if (SrcIdx1 == 1 && SrcIdx2 == 2) {
DstIdx = 2;
return true;
}
return false;
}
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
/// CommuteToFoldReload -
/// Look for
/// r1 = load fi#1
/// r1 = op r1, r2<kill>
/// store r1, fi#1
///
/// If op is commutable and r2 is killed, then we can xform these to
/// r2 = op r2, fi#1
/// store r2, fi#1
bool LocalRewriter::
CommuteToFoldReload(MachineBasicBlock::iterator &MII,
unsigned VirtReg, unsigned SrcReg, int SS,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps,
const TargetRegisterInfo *TRI) {
if (MII == MBB->begin() || !MII->killsRegister(SrcReg))
return false;
MachineInstr &MI = *MII;
MachineBasicBlock::iterator DefMII = prior(MII);
MachineInstr *DefMI = DefMII;
const TargetInstrDesc &TID = DefMI->getDesc();
unsigned NewDstIdx;
if (DefMII != MBB->begin() &&
TID.isCommutable() &&
CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) {
MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
unsigned NewReg = NewDstMO.getReg();
if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
return false;
MachineInstr *ReloadMI = prior(DefMII);
int FrameIdx;
unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
if (DestReg != SrcReg || FrameIdx != SS)
return false;
int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
if (UseIdx == -1)
return false;
unsigned DefIdx;
if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
return false;
assert(DefMI->getOperand(DefIdx).isReg() &&
DefMI->getOperand(DefIdx).getReg() == SrcReg);
// Now commute def instruction.
MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
if (!CommutedMI)
return false;
Jakob Stoklund Olesen
committed
MBB->insert(MII, CommutedMI);
SmallVector<unsigned, 1> Ops;
Ops.push_back(NewDstIdx);
Jakob Stoklund Olesen
committed
MachineInstr *FoldedMI = TII->foldMemoryOperand(CommutedMI, Ops, SS);
// Not needed since foldMemoryOperand returns new MI.
Jakob Stoklund Olesen
committed
CommutedMI->eraseFromParent();
if (!FoldedMI)
return false;
VRM->addSpillSlotUse(SS, FoldedMI);
VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
// Insert new def MI and spill MI.
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
Evan Cheng
committed
TII->storeRegToStackSlot(*MBB, &MI, NewReg, true, SS, RC, TRI);
MII = prior(MII);
MachineInstr *StoreMI = MII;
VRM->addSpillSlotUse(SS, StoreMI);
VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
Jakob Stoklund Olesen
committed
MII = FoldedMI; // Update MII to backtrack.
// Delete all 3 old instructions.
InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
Jakob Stoklund Olesen
committed
EraseInstr(ReloadMI);
InvalidateKills(*DefMI, TRI, RegKills, KillOps);
Jakob Stoklund Olesen
committed
EraseInstr(DefMI);
InvalidateKills(MI, TRI, RegKills, KillOps);
Jakob Stoklund Olesen
committed
EraseInstr(&MI);
// If NewReg was previously holding value of some SS, it's now clobbered.
// This has to be done now because it's a physical register. When this
// instruction is re-visited, it's ignored.
Spills.ClobberPhysReg(NewReg);
++NumCommutes;
return true;
}
return false;
}
/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
/// the last store to the same slot is now dead. If so, remove the last store.
void LocalRewriter::
SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
int Idx, unsigned PhysReg, int StackSlot,
const TargetRegisterClass *RC,
bool isAvailable, MachineInstr *&LastStore,
AvailableSpills &Spills,
SmallSet<MachineInstr*, 4> &ReMatDefs,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
Evan Cheng
committed
TII->storeRegToStackSlot(*MBB, llvm::next(MII), PhysReg, true, StackSlot, RC,
TRI);
MachineInstr *StoreMI = prior(oldNextMII);
VRM->addSpillSlotUse(StackSlot, StoreMI);
DEBUG(dbgs() << "Store:\t" << *StoreMI);
// If there is a dead store to this stack slot, nuke it now.
if (LastStore) {
DEBUG(dbgs() << "Removed dead store:\t" << *LastStore);
++NumDSE;
SmallVector<unsigned, 2> KillRegs;
InvalidateKills(*LastStore, TRI, RegKills, KillOps, &KillRegs);
MachineBasicBlock::iterator PrevMII = LastStore;
bool CheckDef = PrevMII != MBB->begin();
if (CheckDef)
--PrevMII;
Jakob Stoklund Olesen
committed
EraseInstr(LastStore);
if (CheckDef) {
// Look at defs of killed registers on the store. Mark the defs
// as dead since the store has been deleted and they aren't
// being reused.
for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
bool HasOtherDef = false;
if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef, TRI)) {
MachineInstr *DeadDef = PrevMII;
if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
// FIXME: This assumes a remat def does not have side effects.
Jakob Stoklund Olesen
committed
EraseInstr(DeadDef);
++NumDRM;
}
}
}
}
}
// Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
// the last of multiple instructions is the actual store.
LastStore = prior(oldNextMII);
// If the stack slot value was previously available in some other
// register, change it now. Otherwise, make the register available,
// in PhysReg.
Spills.ModifyStackSlotOrReMat(StackSlot);
Spills.ClobberPhysReg(PhysReg);
Spills.addAvailable(StackSlot, PhysReg, isAvailable);
++NumStores;
}
/// isSafeToDelete - Return true if this instruction doesn't produce any side
/// effect and all of its defs are dead.
static bool isSafeToDelete(MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
if (TID.mayLoad() || TID.mayStore() || TID.isTerminator() ||
TID.isCall() || TID.isBarrier() || TID.isReturn() ||
MI.isLabel() || MI.isDebugValue() ||
MI.hasUnmodeledSideEffects())
return false;
// Technically speaking inline asm without side effects and no defs can still
// be deleted. But there is so much bad inline asm code out there, we should
// let them be.
if (MI.isInlineAsm())
return false;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.getReg())
continue;
if (MO.isDef() && !MO.isDead())
return false;
if (MO.isUse() && MO.isKill())
// FIXME: We can't remove kill markers or else the scavenger will assert.
// An alternative is to add a ADD pseudo instruction to replace kill
// markers.
return true;
}
/// TransferDeadness - A identity copy definition is dead and it's being
/// removed. Find the last def or use and mark it as dead / kill.
void LocalRewriter::
Jakob Stoklund Olesen
committed
TransferDeadness(unsigned Reg, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
SmallPtrSet<MachineInstr*, 4> Seens;
SmallVector<std::pair<MachineInstr*, int>,8> Refs;
for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(Reg),
RE = MRI->reg_end(); RI != RE; ++RI) {
MachineInstr *UDMI = &*RI;
if (UDMI->isDebugValue() || UDMI->getParent() != MBB)
continue;
DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
Jakob Stoklund Olesen
committed
if (DI == DistanceMap.end())
continue;
if (Seens.insert(UDMI))
Refs.push_back(std::make_pair(UDMI, DI->second));
}
if (Refs.empty())
return;
std::sort(Refs.begin(), Refs.end(), RefSorter());
Evan Cheng
committed
while (!Refs.empty()) {
MachineInstr *LastUDMI = Refs.back().first;
Refs.pop_back();
Evan Cheng
committed
MachineOperand *LastUD = NULL;
for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = LastUDMI->getOperand(i);
if (!MO.isReg() || MO.getReg() != Reg)
continue;
if (!LastUD || (LastUD->isUse() && MO.isDef()))
LastUD = &MO;
if (LastUDMI->isRegTiedToDefOperand(i))
break;
}
if (LastUD->isDef()) {
// If the instruction has no side effect, delete it and propagate
// backward further. Otherwise, mark is dead and we are done.
if (!isSafeToDelete(*LastUDMI)) {
LastUD->setIsDead();
Evan Cheng
committed
break;
}
Jakob Stoklund Olesen
committed
EraseInstr(LastUDMI);
} else {
LastUD->setIsKill();
RegKills.set(Reg);
KillOps[Reg] = LastUD;
break;
}
}
Jakob Stoklund Olesen
committed
/// InsertEmergencySpills - Insert emergency spills before MI if requested by
/// VRM. Return true if spills were inserted.
bool LocalRewriter::InsertEmergencySpills(MachineInstr *MI) {
if (!VRM->hasEmergencySpills(MI))
return false;
MachineBasicBlock::iterator MII = MI;
SmallSet<int, 4> UsedSS;
std::vector<unsigned> &EmSpills = VRM->getEmergencySpills(MI);
for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
unsigned PhysReg = EmSpills[i];
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysReg);
Jakob Stoklund Olesen
committed
assert(RC && "Unable to determine register class!");
int SS = VRM->getEmergencySpillSlot(RC);
if (UsedSS.count(SS))
llvm_unreachable("Need to spill more than one physical registers!");
UsedSS.insert(SS);
Evan Cheng
committed
TII->storeRegToStackSlot(*MBB, MII, PhysReg, true, SS, RC, TRI);
Jakob Stoklund Olesen
committed
MachineInstr *StoreMI = prior(MII);
VRM->addSpillSlotUse(SS, StoreMI);
// Back-schedule reloads and remats.
MachineBasicBlock::iterator InsertLoc =
ComputeReloadLoc(llvm::next(MII), MBB->begin(), PhysReg, TRI, false, SS,
TII, *MBB->getParent());
Evan Cheng
committed
TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SS, RC, TRI);
Jakob Stoklund Olesen
committed
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
MachineInstr *LoadMI = prior(InsertLoc);
VRM->addSpillSlotUse(SS, LoadMI);
++NumPSpills;
DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
}
return true;
}
/// InsertRestores - Restore registers before MI is requested by VRM. Return
/// true is any instructions were inserted.
bool LocalRewriter::InsertRestores(MachineInstr *MI,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
if (!VRM->isRestorePt(MI))
return false;
MachineBasicBlock::iterator MII = MI;
std::vector<unsigned> &RestoreRegs = VRM->getRestorePtRestores(MI);
for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
if (!VRM->getPreSplitReg(VirtReg))
continue; // Split interval spilled again.
unsigned Phys = VRM->getPhys(VirtReg);
MRI->setPhysRegUsed(Phys);
// Check if the value being restored if available. If so, it must be
// from a predecessor BB that fallthrough into this BB. We do not
// expect:
// BB1:
// r1 = load fi#1
// ...
// = r1<kill>
// ... # r1 not clobbered
// ...
// = load fi#1
bool DoReMat = VRM->isReMaterialized(VirtReg);
int SSorRMId = DoReMat
? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
if (InReg == Phys) {
// If the value is already available in the expected register, save
// a reload / remat.
if (SSorRMId)
DEBUG(dbgs() << "Reusing RM#"
<< SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
else
DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
DEBUG(dbgs() << " from physreg "
<< TRI->getName(InReg) << " for vreg"
<< VirtReg <<" instead of reloading into physreg "
<< TRI->getName(Phys) << '\n');
// Reusing a physreg may resurrect it. But we expect ProcessUses to update
// the kill flags for the current instruction after processing it.
Jakob Stoklund Olesen
committed
++NumOmitted;
continue;
} else if (InReg && InReg != Phys) {
if (SSorRMId)
DEBUG(dbgs() << "Reusing RM#"
<< SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
else
DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
DEBUG(dbgs() << " from physreg "
<< TRI->getName(InReg) << " for vreg"
<< VirtReg <<" by copying it into physreg "
<< TRI->getName(Phys) << '\n');
// If the reloaded / remat value is available in another register,
// copy it to the desired register.
// Back-schedule reloads and remats.
MachineBasicBlock::iterator InsertLoc =
ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
*MBB->getParent());
Jakob Stoklund Olesen
committed
MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI->getDebugLoc(),
TII->get(TargetOpcode::COPY), Phys)
.addReg(InReg, RegState::Kill);
Jakob Stoklund Olesen
committed
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
// This invalidates Phys.
Spills.ClobberPhysReg(Phys);
// Remember it's available.
Spills.addAvailable(SSorRMId, Phys);
CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
UpdateKills(*CopyMI, TRI, RegKills, KillOps);
DEBUG(dbgs() << '\t' << *CopyMI);
++NumCopified;
continue;
}
// Back-schedule reloads and remats.
MachineBasicBlock::iterator InsertLoc =
ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
*MBB->getParent());
if (VRM->isReMaterialized(VirtReg)) {
ReMaterialize(*MBB, InsertLoc, Phys, VirtReg, TII, TRI, *VRM);
} else {
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
Evan Cheng
committed
TII->loadRegFromStackSlot(*MBB, InsertLoc, Phys, SSorRMId, RC, TRI);
Jakob Stoklund Olesen
committed
MachineInstr *LoadMI = prior(InsertLoc);
VRM->addSpillSlotUse(SSorRMId, LoadMI);
++NumLoads;
DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
}
// This invalidates Phys.
Spills.ClobberPhysReg(Phys);
// Remember it's available.
Spills.addAvailable(SSorRMId, Phys);
UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
DEBUG(dbgs() << '\t' << *prior(MII));
}
return true;
}
Jakob Stoklund Olesen
committed
/// InsertSpills - Insert spills after MI if requested by VRM. Return
Jakob Stoklund Olesen
committed
/// true if spills were inserted.
bool LocalRewriter::InsertSpills(MachineInstr *MI) {
if (!VRM->isSpillPt(MI))
return false;
MachineBasicBlock::iterator MII = MI;
std::vector<std::pair<unsigned,bool> > &SpillRegs =
VRM->getSpillPtSpills(MI);
for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
unsigned VirtReg = SpillRegs[i].first;
bool isKill = SpillRegs[i].second;
if (!VRM->getPreSplitReg(VirtReg))
continue; // Split interval spilled again.
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
unsigned Phys = VRM->getPhys(VirtReg);
int StackSlot = VRM->getStackSlot(VirtReg);
MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
TII->storeRegToStackSlot(*MBB, llvm::next(MII), Phys, isKill, StackSlot,
Evan Cheng
committed
RC, TRI);
Jakob Stoklund Olesen
committed
MachineInstr *StoreMI = prior(oldNextMII);
VRM->addSpillSlotUse(StackSlot, StoreMI);
DEBUG(dbgs() << "Store:\t" << *StoreMI);
VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
}
return true;
}
Jakob Stoklund Olesen
committed
/// ProcessUses - Process all of MI's spilled operands and all available
/// operands.
void LocalRewriter::ProcessUses(MachineInstr &MI, AvailableSpills &Spills,
std::vector<MachineInstr*> &MaybeDeadStores,
BitVector &RegKills,
ReuseInfo &ReusedOperands,
std::vector<MachineOperand*> &KillOps) {
// Clear kill info.
SmallSet<unsigned, 2> KilledMIRegs;
SmallVector<unsigned, 4> VirtUseOps;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.getReg() == 0)
continue; // Ignore non-register operands.
unsigned VirtReg = MO.getReg();
Jakob Stoklund Olesen
committed
if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
// Ignore physregs for spilling, but remember that it is used by this
// function.
MRI->setPhysRegUsed(VirtReg);
continue;
}
// We want to process implicit virtual register uses first.
if (MO.isImplicit())
// If the virtual register is implicitly defined, emit a implicit_def
// before so scavenger knows it's "defined".
// FIXME: This is a horrible hack done the by register allocator to
// remat a definition with virtual register operand.
VirtUseOps.insert(VirtUseOps.begin(), i);
else
VirtUseOps.push_back(i);
// A partial def causes problems because the same operand both reads and
// writes the register. This rewriter is designed to rewrite uses and defs
// separately, so a partial def would already have been rewritten to a
// physreg by the time we get to processing defs.
// Add an implicit use operand to model the partial def.
if (MO.isDef() && MO.getSubReg() && MI.readsVirtualRegister(VirtReg) &&
MI.findRegisterUseOperandIdx(VirtReg) == -1) {
VirtUseOps.insert(VirtUseOps.begin(), MI.getNumOperands());
MI.addOperand(MachineOperand::CreateReg(VirtReg,
false, // isDef
true)); // isImplicit
DEBUG(dbgs() << "Partial redef: " << MI);
}
Jakob Stoklund Olesen
committed
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
}
// Process all of the spilled uses and all non spilled reg references.
SmallVector<int, 2> PotentialDeadStoreSlots;
KilledMIRegs.clear();
for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
unsigned i = VirtUseOps[j];
unsigned VirtReg = MI.getOperand(i).getReg();
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Not a virtual register?");
unsigned SubIdx = MI.getOperand(i).getSubReg();
if (VRM->isAssignedReg(VirtReg)) {
// This virtual register was assigned a physreg!
unsigned Phys = VRM->getPhys(VirtReg);
MRI->setPhysRegUsed(Phys);
if (MI.getOperand(i).isDef())
ReusedOperands.markClobbered(Phys);
substitutePhysReg(MI.getOperand(i), Phys, *TRI);
if (VRM->isImplicitlyDefined(VirtReg))
// FIXME: Is this needed?
BuildMI(*MBB, &MI, MI.getDebugLoc(),
TII->get(TargetOpcode::IMPLICIT_DEF), Phys);
continue;
}
// This virtual register is now known to be a spilled value.
if (!MI.getOperand(i).isUse())
continue; // Handle defs in the loop below (handle use&def here though)
bool AvoidReload = MI.getOperand(i).isUndef();
// Check if it is defined by an implicit def. It should not be spilled.
// Note, this is for correctness reason. e.g.
// 8 %reg1024<def> = IMPLICIT_DEF
// 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
// The live range [12, 14) are not part of the r1024 live interval since
// it's defined by an implicit def. It will not conflicts with live
// interval of r1025. Now suppose both registers are spilled, you can
// easily see a situation where both registers are reloaded before
// the INSERT_SUBREG and both target registers that would overlap.
bool DoReMat = VRM->isReMaterialized(VirtReg);
int SSorRMId = DoReMat
? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
int ReuseSlot = SSorRMId;
// Check to see if this stack slot is available.
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
// If this is a sub-register use, make sure the reuse register is in the
// right register class. For example, for x86 not all of the 32-bit
// registers have accessible sub-registers.
// Similarly so for EXTRACT_SUBREG. Consider this:
// EDI = op
// MOV32_mr fi#1, EDI
// ...
// = EXTRACT_SUBREG fi#1
// fi#1 is available in EDI, but it cannot be reused because it's not in
// the right register file.
if (PhysReg && !AvoidReload && SubIdx) {
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
if (!RC->contains(PhysReg))