Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
unsigned Reg = MO.getReg();
if (MO.isDef()) {
Defs.set(Reg);
for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
Defs.set(*AS);
} else {
LocalUses.push_back(Reg);
if (MO.isKill() && AllocatableRegs[Reg])
Kills.push_back(Reg);
}
}
for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
unsigned Kill = Kills[i];
if (!Defs[Kill] && !Uses[Kill] &&
TRI->getPhysicalRegisterRegClass(Kill) == RC)
return Kill;
}
for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
unsigned Reg = LocalUses[i];
Uses.set(Reg);
for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
Uses.set(*AS);
}
MII = PrevMI;
}
return 0;
}
static
Jakob Stoklund Olesen
committed
void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg,
const TargetRegisterInfo &TRI) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.getReg() == VirtReg)
Jakob Stoklund Olesen
committed
substitutePhysReg(MO, PhysReg, TRI);
}
}
Evan Cheng
committed
namespace {
struct RefSorter {
bool operator()(const std::pair<MachineInstr*, int> &A,
const std::pair<MachineInstr*, int> &B) {
return A.second < B.second;
}
};
// ***************************** //
// Local Spiller Implementation //
// ***************************** //
Nick Lewycky
committed
class LocalRewriter : public VirtRegRewriter {
MachineRegisterInfo *MRI;
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
VirtRegMap *VRM;
BitVector AllocatableRegs;
DenseMap<MachineInstr*, unsigned> DistanceMap;
MachineBasicBlock *MBB; // Basic block currently being processed.
bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
LiveIntervals* LIs);
private:
bool OptimizeByUnfold2(unsigned VirtReg, int SS,
MachineBasicBlock::iterator &MII,
std::vector<MachineInstr*> &MaybeDeadStores,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
bool OptimizeByUnfold(MachineBasicBlock::iterator &MII,
std::vector<MachineInstr*> &MaybeDeadStores,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
bool CommuteToFoldReload(MachineBasicBlock::iterator &MII,
unsigned VirtReg, unsigned SrcReg, int SS,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps,
const TargetRegisterInfo *TRI);
void SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
int Idx, unsigned PhysReg, int StackSlot,
const TargetRegisterClass *RC,
bool isAvailable, MachineInstr *&LastStore,
AvailableSpills &Spills,
SmallSet<MachineInstr*, 4> &ReMatDefs,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
Jakob Stoklund Olesen
committed
void TransferDeadness(unsigned Reg, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
Jakob Stoklund Olesen
committed
bool InsertEmergencySpills(MachineInstr *MI);
bool InsertRestores(MachineInstr *MI,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
bool InsertSpills(MachineInstr *MI);
void RewriteMBB(LiveIntervals *LIs,
AvailableSpills &Spills, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
};
}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm,
LiveIntervals* LIs) {
MRI = &MF.getRegInfo();
TRI = MF.getTarget().getRegisterInfo();
TII = MF.getTarget().getInstrInfo();
VRM = &vrm;
AllocatableRegs = TRI->getAllocatableSet(MF);
DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
<< MF.getFunction()->getName() << "':\n");
DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
" reloads!) ****\n");
DEBUG(MF.dump());
// Spills - Keep track of which spilled values are available in physregs
// so that we can choose to reuse the physregs instead of emitting
// reloads. This is usually refreshed per basic block.
AvailableSpills Spills(TRI, TII);
// Keep track of kill information.
BitVector RegKills(TRI->getNumRegs());
std::vector<MachineOperand*> KillOps;
KillOps.resize(TRI->getNumRegs(), NULL);
// SingleEntrySuccs - Successor blocks which have a single predecessor.
SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
// Traverse the basic blocks depth first.
MachineBasicBlock *Entry = MF.begin();
SmallPtrSet<MachineBasicBlock*,16> Visited;
for (df_ext_iterator<MachineBasicBlock*,
SmallPtrSet<MachineBasicBlock*,16> >
DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
DFI != E; ++DFI) {
MBB = *DFI;
if (!EarlyVisited.count(MBB))
RewriteMBB(LIs, Spills, RegKills, KillOps);
// If this MBB is the only predecessor of a successor. Keep the
// availability information and visit it next.
do {
// Keep visiting single predecessor successor as long as possible.
SinglePredSuccs.clear();
findSinglePredSuccessor(MBB, SinglePredSuccs);
if (SinglePredSuccs.empty())
MBB = 0;
else {
// FIXME: More than one successors, each of which has MBB has
// the only predecessor.
MBB = SinglePredSuccs[0];
if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
RewriteMBB(LIs, Spills, RegKills, KillOps);
}
}
} while (MBB);
// Clear the availability info.
Spills.clear();
}
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
DEBUG(MF.dump());
// Mark unused spill slots.
MachineFrameInfo *MFI = MF.getFrameInfo();
int SS = VRM->getLowSpillSlot();
if (SS != VirtRegMap::NO_STACK_SLOT)
for (int e = VRM->getHighSpillSlot(); SS <= e; ++SS)
if (!VRM->isSpillSlotUsed(SS)) {
MFI->RemoveStackObject(SS);
++NumDSS;
}
return true;
}
/// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
/// a scratch register is available.
/// xorq %r12<kill>, %r13
/// addq %rax, -184(%rbp)
/// addq %r13, -184(%rbp)
/// ==>
/// xorq %r12<kill>, %r13
/// movq -184(%rbp), %r12
/// addq %rax, %r12
/// addq %r13, %r12
/// movq %r12, -184(%rbp)
bool LocalRewriter::
OptimizeByUnfold2(unsigned VirtReg, int SS,
MachineBasicBlock::iterator &MII,
std::vector<MachineInstr*> &MaybeDeadStores,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
MachineBasicBlock::iterator NextMII = llvm::next(MII);
if (NextMII == MBB->end())
return false;
if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
return false;
// Now let's see if the last couple of instructions happens to have freed up
// a register.
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
unsigned PhysReg = FindFreeRegister(MII, *MBB, RC, TRI, AllocatableRegs);
if (!PhysReg)
return false;
MachineFunction &MF = *MBB->getParent();
TRI = MF.getTarget().getRegisterInfo();
MachineInstr &MI = *MII;
if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, *VRM))
return false;
// If the next instruction also folds the same SS modref and can be unfoled,
// then it's worthwhile to issue a load from SS into the free register and
// then unfold these instructions.
if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM))
return false;
// Back-schedule reloads and remats.
ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, false, SS, TII, MF);
// Load from SS to the spare physical register.
TII->loadRegFromStackSlot(*MBB, MII, PhysReg, SS, RC);
// This invalidates Phys.
Spills.ClobberPhysReg(PhysReg);
// Remember it's available.
Spills.addAvailable(SS, PhysReg);
MaybeDeadStores[SS] = NULL;
// Unfold current MI.
SmallVector<MachineInstr*, 4> NewMIs;
if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs))
llvm_unreachable("Unable unfold the load / store folding instruction!");
assert(NewMIs.size() == 1);
AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
VRM->transferRestorePts(&MI, NewMIs[0]);
MII = MBB->insert(MII, NewMIs[0]);
InvalidateKills(MI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(&MI);
MBB->erase(&MI);
++NumModRefUnfold;
// Unfold next instructions that fold the same SS.
do {
MachineInstr &NextMI = *NextMII;
NextMII = llvm::next(NextMII);
NewMIs.clear();
if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs))
llvm_unreachable("Unable unfold the load / store folding instruction!");
assert(NewMIs.size() == 1);
Jakob Stoklund Olesen
committed
AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
VRM->transferRestorePts(&NextMI, NewMIs[0]);
MBB->insert(NextMII, NewMIs[0]);
InvalidateKills(NextMI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(&NextMI);
MBB->erase(&NextMI);
++NumModRefUnfold;
if (NextMII == MBB->end())
break;
} while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM));
// Store the value back into SS.
TII->storeRegToStackSlot(*MBB, NextMII, PhysReg, true, SS, RC);
MachineInstr *StoreMI = prior(NextMII);
VRM->addSpillSlotUse(SS, StoreMI);
VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
return true;
}
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
/// OptimizeByUnfold - Turn a store folding instruction into a load folding
/// instruction. e.g.
/// xorl %edi, %eax
/// movl %eax, -32(%ebp)
/// movl -36(%ebp), %eax
/// orl %eax, -32(%ebp)
/// ==>
/// xorl %edi, %eax
/// orl -36(%ebp), %eax
/// mov %eax, -32(%ebp)
/// This enables unfolding optimization for a subsequent instruction which will
/// also eliminate the newly introduced store instruction.
bool LocalRewriter::
OptimizeByUnfold(MachineBasicBlock::iterator &MII,
std::vector<MachineInstr*> &MaybeDeadStores,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
MachineFunction &MF = *MBB->getParent();
MachineInstr &MI = *MII;
unsigned UnfoldedOpc = 0;
unsigned UnfoldPR = 0;
unsigned UnfoldVR = 0;
int FoldedSS = VirtRegMap::NO_STACK_SLOT;
VirtRegMap::MI2VirtMapTy::const_iterator I, End;
for (tie(I, End) = VRM->getFoldedVirts(&MI); I != End; ) {
// Only transform a MI that folds a single register.
if (UnfoldedOpc)
return false;
UnfoldVR = I->second.first;
VirtRegMap::ModRef MR = I->second.second;
// MI2VirtMap be can updated which invalidate the iterator.
// Increment the iterator first.
++I;
if (VRM->isAssignedReg(UnfoldVR))
continue;
// If this reference is not a use, any previous store is now dead.
// Otherwise, the store to this stack slot is not dead anymore.
FoldedSS = VRM->getStackSlot(UnfoldVR);
MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
if (DeadStore && (MR & VirtRegMap::isModRef)) {
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
if (!PhysReg || !DeadStore->readsRegister(PhysReg))
continue;
UnfoldPR = PhysReg;
UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
false, true);
}
if (!UnfoldedOpc) {
if (!UnfoldVR)
return false;
// Look for other unfolding opportunities.
return OptimizeByUnfold2(UnfoldVR, FoldedSS, MII, MaybeDeadStores, Spills,
RegKills, KillOps);
}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
continue;
unsigned VirtReg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
continue;
if (VRM->isAssignedReg(VirtReg)) {
unsigned PhysReg = VRM->getPhys(VirtReg);
if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
return false;
} else if (VRM->isReMaterialized(VirtReg))
continue;
int SS = VRM->getStackSlot(VirtReg);
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
if (PhysReg) {
if (TRI->regsOverlap(PhysReg, UnfoldPR))
return false;
continue;
}
if (VRM->hasPhys(VirtReg)) {
PhysReg = VRM->getPhys(VirtReg);
if (!TRI->regsOverlap(PhysReg, UnfoldPR))
continue;
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
// Ok, we'll need to reload the value into a register which makes
// it impossible to perform the store unfolding optimization later.
// Let's see if it is possible to fold the load if the store is
// unfolded. This allows us to perform the store unfolding
// optimization.
SmallVector<MachineInstr*, 4> NewMIs;
if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
assert(NewMIs.size() == 1);
MachineInstr *NewMI = NewMIs.back();
NewMIs.clear();
int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
assert(Idx != -1);
SmallVector<unsigned, 1> Ops;
Ops.push_back(Idx);
MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
if (FoldedMI) {
VRM->addSpillSlotUse(SS, FoldedMI);
if (!VRM->hasPhys(UnfoldVR))
VRM->assignVirt2Phys(UnfoldVR, UnfoldPR);
VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
MII = MBB->insert(MII, FoldedMI);
InvalidateKills(MI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(&MI);
MBB->erase(&MI);
MF.DeleteMachineInstr(NewMI);
return true;
}
MF.DeleteMachineInstr(NewMI);
}
return false;
}
/// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
/// where SrcReg is r1 and it is tied to r0. Return true if after
/// commuting this instruction it will be r0 = op r2, r1.
static bool CommuteChangesDestination(MachineInstr *DefMI,
const TargetInstrDesc &TID,
unsigned SrcReg,
const TargetInstrInfo *TII,
unsigned &DstIdx) {
if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3)
return false;
if (!DefMI->getOperand(1).isReg() ||
DefMI->getOperand(1).getReg() != SrcReg)
return false;
unsigned DefIdx;
if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0)
return false;
unsigned SrcIdx1, SrcIdx2;
if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2))
return false;
if (SrcIdx1 == 1 && SrcIdx2 == 2) {
DstIdx = 2;
return true;
}
return false;
}
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
/// CommuteToFoldReload -
/// Look for
/// r1 = load fi#1
/// r1 = op r1, r2<kill>
/// store r1, fi#1
///
/// If op is commutable and r2 is killed, then we can xform these to
/// r2 = op r2, fi#1
/// store r2, fi#1
bool LocalRewriter::
CommuteToFoldReload(MachineBasicBlock::iterator &MII,
unsigned VirtReg, unsigned SrcReg, int SS,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps,
const TargetRegisterInfo *TRI) {
if (MII == MBB->begin() || !MII->killsRegister(SrcReg))
return false;
MachineFunction &MF = *MBB->getParent();
MachineInstr &MI = *MII;
MachineBasicBlock::iterator DefMII = prior(MII);
MachineInstr *DefMI = DefMII;
const TargetInstrDesc &TID = DefMI->getDesc();
unsigned NewDstIdx;
if (DefMII != MBB->begin() &&
TID.isCommutable() &&
CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) {
MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
unsigned NewReg = NewDstMO.getReg();
if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
return false;
MachineInstr *ReloadMI = prior(DefMII);
int FrameIdx;
unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
if (DestReg != SrcReg || FrameIdx != SS)
return false;
int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
if (UseIdx == -1)
return false;
unsigned DefIdx;
if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
return false;
assert(DefMI->getOperand(DefIdx).isReg() &&
DefMI->getOperand(DefIdx).getReg() == SrcReg);
// Now commute def instruction.
MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
if (!CommutedMI)
return false;
SmallVector<unsigned, 1> Ops;
Ops.push_back(NewDstIdx);
MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
// Not needed since foldMemoryOperand returns new MI.
MF.DeleteMachineInstr(CommutedMI);
if (!FoldedMI)
return false;
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
VRM->addSpillSlotUse(SS, FoldedMI);
VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
// Insert new def MI and spill MI.
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
TII->storeRegToStackSlot(*MBB, &MI, NewReg, true, SS, RC);
MII = prior(MII);
MachineInstr *StoreMI = MII;
VRM->addSpillSlotUse(SS, StoreMI);
VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
MII = MBB->insert(MII, FoldedMI); // Update MII to backtrack.
// Delete all 3 old instructions.
InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(ReloadMI);
MBB->erase(ReloadMI);
InvalidateKills(*DefMI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(DefMI);
MBB->erase(DefMI);
InvalidateKills(MI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(&MI);
MBB->erase(&MI);
// If NewReg was previously holding value of some SS, it's now clobbered.
// This has to be done now because it's a physical register. When this
// instruction is re-visited, it's ignored.
Spills.ClobberPhysReg(NewReg);
++NumCommutes;
return true;
}
return false;
}
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
/// the last store to the same slot is now dead. If so, remove the last store.
void LocalRewriter::
SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
int Idx, unsigned PhysReg, int StackSlot,
const TargetRegisterClass *RC,
bool isAvailable, MachineInstr *&LastStore,
AvailableSpills &Spills,
SmallSet<MachineInstr*, 4> &ReMatDefs,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
TII->storeRegToStackSlot(*MBB, llvm::next(MII), PhysReg, true, StackSlot, RC);
MachineInstr *StoreMI = prior(oldNextMII);
VRM->addSpillSlotUse(StackSlot, StoreMI);
DEBUG(dbgs() << "Store:\t" << *StoreMI);
// If there is a dead store to this stack slot, nuke it now.
if (LastStore) {
DEBUG(dbgs() << "Removed dead store:\t" << *LastStore);
++NumDSE;
SmallVector<unsigned, 2> KillRegs;
InvalidateKills(*LastStore, TRI, RegKills, KillOps, &KillRegs);
MachineBasicBlock::iterator PrevMII = LastStore;
bool CheckDef = PrevMII != MBB->begin();
if (CheckDef)
--PrevMII;
VRM->RemoveMachineInstrFromMaps(LastStore);
MBB->erase(LastStore);
if (CheckDef) {
// Look at defs of killed registers on the store. Mark the defs
// as dead since the store has been deleted and they aren't
// being reused.
for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
bool HasOtherDef = false;
if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef, TRI)) {
MachineInstr *DeadDef = PrevMII;
if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
// FIXME: This assumes a remat def does not have side effects.
VRM->RemoveMachineInstrFromMaps(DeadDef);
MBB->erase(DeadDef);
++NumDRM;
}
}
}
}
}
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
// Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
// the last of multiple instructions is the actual store.
LastStore = prior(oldNextMII);
// If the stack slot value was previously available in some other
// register, change it now. Otherwise, make the register available,
// in PhysReg.
Spills.ModifyStackSlotOrReMat(StackSlot);
Spills.ClobberPhysReg(PhysReg);
Spills.addAvailable(StackSlot, PhysReg, isAvailable);
++NumStores;
}
/// isSafeToDelete - Return true if this instruction doesn't produce any side
/// effect and all of its defs are dead.
static bool isSafeToDelete(MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
if (TID.mayLoad() || TID.mayStore() || TID.isCall() || TID.isTerminator() ||
TID.isCall() || TID.isBarrier() || TID.isReturn() ||
TID.hasUnmodeledSideEffects())
return false;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.getReg())
continue;
if (MO.isDef() && !MO.isDead())
return false;
if (MO.isUse() && MO.isKill())
// FIXME: We can't remove kill markers or else the scavenger will assert.
// An alternative is to add a ADD pseudo instruction to replace kill
// markers.
return true;
}
/// TransferDeadness - A identity copy definition is dead and it's being
/// removed. Find the last def or use and mark it as dead / kill.
void LocalRewriter::
Jakob Stoklund Olesen
committed
TransferDeadness(unsigned Reg, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
SmallPtrSet<MachineInstr*, 4> Seens;
SmallVector<std::pair<MachineInstr*, int>,8> Refs;
for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(Reg),
RE = MRI->reg_end(); RI != RE; ++RI) {
MachineInstr *UDMI = &*RI;
if (UDMI->getParent() != MBB)
continue;
DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
Jakob Stoklund Olesen
committed
if (DI == DistanceMap.end())
continue;
if (Seens.insert(UDMI))
Refs.push_back(std::make_pair(UDMI, DI->second));
}
if (Refs.empty())
return;
std::sort(Refs.begin(), Refs.end(), RefSorter());
Evan Cheng
committed
while (!Refs.empty()) {
MachineInstr *LastUDMI = Refs.back().first;
Refs.pop_back();
Evan Cheng
committed
MachineOperand *LastUD = NULL;
for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = LastUDMI->getOperand(i);
if (!MO.isReg() || MO.getReg() != Reg)
continue;
if (!LastUD || (LastUD->isUse() && MO.isDef()))
LastUD = &MO;
if (LastUDMI->isRegTiedToDefOperand(i))
break;
}
if (LastUD->isDef()) {
// If the instruction has no side effect, delete it and propagate
// backward further. Otherwise, mark is dead and we are done.
if (!isSafeToDelete(*LastUDMI)) {
LastUD->setIsDead();
Evan Cheng
committed
break;
}
VRM->RemoveMachineInstrFromMaps(LastUDMI);
MBB->erase(LastUDMI);
} else {
LastUD->setIsKill();
RegKills.set(Reg);
KillOps[Reg] = LastUD;
break;
}
}
Jakob Stoklund Olesen
committed
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
/// InsertEmergencySpills - Insert emergency spills before MI if requested by
/// VRM. Return true if spills were inserted.
bool LocalRewriter::InsertEmergencySpills(MachineInstr *MI) {
if (!VRM->hasEmergencySpills(MI))
return false;
MachineBasicBlock::iterator MII = MI;
SmallSet<int, 4> UsedSS;
std::vector<unsigned> &EmSpills = VRM->getEmergencySpills(MI);
for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
unsigned PhysReg = EmSpills[i];
const TargetRegisterClass *RC = TRI->getPhysicalRegisterRegClass(PhysReg);
assert(RC && "Unable to determine register class!");
int SS = VRM->getEmergencySpillSlot(RC);
if (UsedSS.count(SS))
llvm_unreachable("Need to spill more than one physical registers!");
UsedSS.insert(SS);
TII->storeRegToStackSlot(*MBB, MII, PhysReg, true, SS, RC);
MachineInstr *StoreMI = prior(MII);
VRM->addSpillSlotUse(SS, StoreMI);
// Back-schedule reloads and remats.
MachineBasicBlock::iterator InsertLoc =
ComputeReloadLoc(llvm::next(MII), MBB->begin(), PhysReg, TRI, false, SS,
TII, *MBB->getParent());
TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SS, RC);
MachineInstr *LoadMI = prior(InsertLoc);
VRM->addSpillSlotUse(SS, LoadMI);
++NumPSpills;
DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
}
return true;
}
/// InsertRestores - Restore registers before MI is requested by VRM. Return
/// true is any instructions were inserted.
bool LocalRewriter::InsertRestores(MachineInstr *MI,
AvailableSpills &Spills,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
if (!VRM->isRestorePt(MI))
return false;
MachineBasicBlock::iterator MII = MI;
std::vector<unsigned> &RestoreRegs = VRM->getRestorePtRestores(MI);
for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
if (!VRM->getPreSplitReg(VirtReg))
continue; // Split interval spilled again.
unsigned Phys = VRM->getPhys(VirtReg);
MRI->setPhysRegUsed(Phys);
// Check if the value being restored if available. If so, it must be
// from a predecessor BB that fallthrough into this BB. We do not
// expect:
// BB1:
// r1 = load fi#1
// ...
// = r1<kill>
// ... # r1 not clobbered
// ...
// = load fi#1
bool DoReMat = VRM->isReMaterialized(VirtReg);
int SSorRMId = DoReMat
? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
if (InReg == Phys) {
// If the value is already available in the expected register, save
// a reload / remat.
if (SSorRMId)
DEBUG(dbgs() << "Reusing RM#"
<< SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
else
DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
DEBUG(dbgs() << " from physreg "
<< TRI->getName(InReg) << " for vreg"
<< VirtReg <<" instead of reloading into physreg "
<< TRI->getName(Phys) << '\n');
++NumOmitted;
continue;
} else if (InReg && InReg != Phys) {
if (SSorRMId)
DEBUG(dbgs() << "Reusing RM#"
<< SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
else
DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
DEBUG(dbgs() << " from physreg "
<< TRI->getName(InReg) << " for vreg"
<< VirtReg <<" by copying it into physreg "
<< TRI->getName(Phys) << '\n');
// If the reloaded / remat value is available in another register,
// copy it to the desired register.
// Back-schedule reloads and remats.
MachineBasicBlock::iterator InsertLoc =
ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
*MBB->getParent());
TII->copyRegToReg(*MBB, InsertLoc, Phys, InReg, RC, RC);
// This invalidates Phys.
Spills.ClobberPhysReg(Phys);
// Remember it's available.
Spills.addAvailable(SSorRMId, Phys);
// Mark is killed.
MachineInstr *CopyMI = prior(InsertLoc);
CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg);
KillOpnd->setIsKill();
UpdateKills(*CopyMI, TRI, RegKills, KillOps);
DEBUG(dbgs() << '\t' << *CopyMI);
++NumCopified;
continue;
}
// Back-schedule reloads and remats.
MachineBasicBlock::iterator InsertLoc =
ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
*MBB->getParent());
if (VRM->isReMaterialized(VirtReg)) {
ReMaterialize(*MBB, InsertLoc, Phys, VirtReg, TII, TRI, *VRM);
} else {
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
TII->loadRegFromStackSlot(*MBB, InsertLoc, Phys, SSorRMId, RC);
MachineInstr *LoadMI = prior(InsertLoc);
VRM->addSpillSlotUse(SSorRMId, LoadMI);
++NumLoads;
DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
}
// This invalidates Phys.
Spills.ClobberPhysReg(Phys);
// Remember it's available.
Spills.addAvailable(SSorRMId, Phys);
UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
DEBUG(dbgs() << '\t' << *prior(MII));
}
return true;
}
/// InsertEmergencySpills - Insert spills after MI if requested by VRM. Return
/// true if spills were inserted.
bool LocalRewriter::InsertSpills(MachineInstr *MI) {
if (!VRM->isSpillPt(MI))
return false;
MachineBasicBlock::iterator MII = MI;
std::vector<std::pair<unsigned,bool> > &SpillRegs =
VRM->getSpillPtSpills(MI);
for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
unsigned VirtReg = SpillRegs[i].first;
bool isKill = SpillRegs[i].second;
if (!VRM->getPreSplitReg(VirtReg))
continue; // Split interval spilled again.
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
unsigned Phys = VRM->getPhys(VirtReg);
int StackSlot = VRM->getStackSlot(VirtReg);
MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
TII->storeRegToStackSlot(*MBB, llvm::next(MII), Phys, isKill, StackSlot,
RC);
MachineInstr *StoreMI = prior(oldNextMII);
VRM->addSpillSlotUse(StackSlot, StoreMI);
DEBUG(dbgs() << "Store:\t" << *StoreMI);
VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
}
return true;
}
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
/// rewriteMBB - Keep track of which spills are available even after the
/// register allocator is done with them. If possible, avid reloading vregs.
void
LocalRewriter::RewriteMBB(LiveIntervals *LIs,
AvailableSpills &Spills, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
<< MBB->getName() << "':\n");
MachineFunction &MF = *MBB->getParent();
// MaybeDeadStores - When we need to write a value back into a stack slot,
// keep track of the inserted store. If the stack slot value is never read
// (because the value was used from some available register, for example), and
// subsequently stored to, the original store is dead. This map keeps track
// of inserted stores that are not used. If we see a subsequent store to the
// same stack slot, the original store is deleted.
std::vector<MachineInstr*> MaybeDeadStores;
MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
// ReMatDefs - These are rematerializable def MIs which are not deleted.
SmallSet<MachineInstr*, 4> ReMatDefs;
// Clear kill info.
SmallSet<unsigned, 2> KilledMIRegs;
RegKills.reset();
KillOps.clear();
KillOps.resize(TRI->getNumRegs(), NULL);
DistanceMap.clear();
for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
MII != E; ) {
MachineBasicBlock::iterator NextMII = llvm::next(MII);
if (OptimizeByUnfold(MII, MaybeDeadStores, Spills, RegKills, KillOps))
NextMII = llvm::next(MII);
Jakob Stoklund Olesen
committed
if (InsertEmergencySpills(MII))
NextMII = llvm::next(MII);
Jakob Stoklund Olesen
committed
InsertRestores(MII, Spills, RegKills, KillOps);
Jakob Stoklund Olesen
committed
if (InsertSpills(MII))
NextMII = llvm::next(MII);
Jakob Stoklund Olesen
committed
VirtRegMap::MI2VirtMapTy::const_iterator I, End;
bool Erased = false;
bool BackTracked = false;
MachineInstr &MI = *MII;
/// ReusedOperands - Keep track of operand reuse in case we need to undo
/// reuse.
ReuseInfo ReusedOperands(MI, TRI);
SmallVector<unsigned, 4> VirtUseOps;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.getReg() == 0)
continue; // Ignore non-register operands.
unsigned VirtReg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
// Ignore physregs for spilling, but remember that it is used by this
// function.
MRI->setPhysRegUsed(VirtReg);
continue;
}
// We want to process implicit virtual register uses first.
if (MO.isImplicit())
// If the virtual register is implicitly defined, emit a implicit_def
// before so scavenger knows it's "defined".
// FIXME: This is a horrible hack done the by register allocator to
// remat a definition with virtual register operand.
VirtUseOps.insert(VirtUseOps.begin(), i);
else
VirtUseOps.push_back(i);
}
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
// Process all of the spilled uses and all non spilled reg references.
SmallVector<int, 2> PotentialDeadStoreSlots;
KilledMIRegs.clear();
for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
unsigned i = VirtUseOps[j];
unsigned VirtReg = MI.getOperand(i).getReg();
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Not a virtual register?");
unsigned SubIdx = MI.getOperand(i).getSubReg();
if (VRM->isAssignedReg(VirtReg)) {
// This virtual register was assigned a physreg!
unsigned Phys = VRM->getPhys(VirtReg);
MRI->setPhysRegUsed(Phys);
if (MI.getOperand(i).isDef())
ReusedOperands.markClobbered(Phys);
substitutePhysReg(MI.getOperand(i), Phys, *TRI);
if (VRM->isImplicitlyDefined(VirtReg))
// FIXME: Is this needed?
BuildMI(*MBB, &MI, MI.getDebugLoc(),
TII->get(TargetOpcode::IMPLICIT_DEF), Phys);
continue;
}
// This virtual register is now known to be a spilled value.
if (!MI.getOperand(i).isUse())
continue; // Handle defs in the loop below (handle use&def here though)
bool AvoidReload = MI.getOperand(i).isUndef();
// Check if it is defined by an implicit def. It should not be spilled.
// Note, this is for correctness reason. e.g.
// 8 %reg1024<def> = IMPLICIT_DEF
// 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
// The live range [12, 14) are not part of the r1024 live interval since
// it's defined by an implicit def. It will not conflicts with live
// interval of r1025. Now suppose both registers are spilled, you can
// easily see a situation where both registers are reloaded before
// the INSERT_SUBREG and both target registers that would overlap.
bool DoReMat = VRM->isReMaterialized(VirtReg);
int SSorRMId = DoReMat
? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
int ReuseSlot = SSorRMId;
// Check to see if this stack slot is available.
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
// If this is a sub-register use, make sure the reuse register is in the
// right register class. For example, for x86 not all of the 32-bit
// registers have accessible sub-registers.
// Similarly so for EXTRACT_SUBREG. Consider this:
// EDI = op
// MOV32_mr fi#1, EDI
// ...
// = EXTRACT_SUBREG fi#1
// fi#1 is available in EDI, but it cannot be reused because it's not in
// the right register file.
if (PhysReg && !AvoidReload && (SubIdx || MI.isExtractSubreg())) {
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
if (!RC->contains(PhysReg))
PhysReg = 0;
}
if (PhysReg && !AvoidReload) {
// This spilled operand might be part of a two-address operand. If this
// is the case, then changing it will necessarily require changing the
// def part of the instruction as well. However, in some cases, we
// aren't allowed to modify the reused register. If none of these cases
// apply, reuse it.
bool CanReuse = true;
bool isTied = MI.isRegTiedToDefOperand(i);
if (isTied) {
// Okay, we have a two address operand. We can reuse this physreg as
// long as we are allowed to clobber the value and there isn't an
// earlier def that has already clobbered the physreg.
CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
Spills.canClobberPhysReg(PhysReg);
}