Newer
Older
// Each bit specify whether it a spill is required in the MBB.
BitVector SpillMBBs(mf_->getNumBlockIDs());
std::map<unsigned, std::pair<int, bool> > SpillIdxes;
BitVector RestoreMBBs(mf_->getNumBlockIDs());
std::map<unsigned, std::pair<int, bool> > RestoreIdxes;
std::vector<LiveInterval*> NewLIs;
SSARegMap *RegMap = mf_->getSSARegMap();
const TargetRegisterClass* rc = RegMap->getRegClass(li.reg);
unsigned NumValNums = li.getNumValNums();
SmallVector<MachineInstr*, 4> ReMatDefs;
ReMatDefs.resize(NumValNums, NULL);
SmallVector<MachineInstr*, 4> ReMatOrigDefs;
ReMatOrigDefs.resize(NumValNums, NULL);
SmallVector<int, 4> ReMatIds;
ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
BitVector ReMatDelete(NumValNums);
unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
// Spilling a split live interval. It cannot be split any further. Also,
// it's also guaranteed to be a single val# / range interval.
if (vrm.getPreSplitReg(li.reg)) {
vrm.setIsSplitFromReg(li.reg, 0);
bool DefIsReMat = vrm.isReMaterialized(li.reg);
Slot = vrm.getStackSlot(li.reg);
assert(Slot != VirtRegMap::MAX_STACK_SLOT);
MachineInstr *ReMatDefMI = DefIsReMat ?
vrm.getReMaterializedMI(li.reg) : NULL;
int LdSlot = 0;
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
bool isLoad = isLoadSS ||
(DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
bool IsFirstRange = true;
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
// If this is a split live interval with multiple ranges, it means there
// are two-address instructions that re-defined the value. Only the
// first def can be rematerialized!
if (IsFirstRange) {
rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
false, vrm, RegMap, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
NewVRegs, NewLIs);
} else {
rewriteInstructionsForSpills(li, false, I, NULL, 0,
Slot, 0, false, false, false,
false, vrm, RegMap, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
NewVRegs, NewLIs);
}
IsFirstRange = false;
}
return NewLIs;
}
bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
TrySplit = false;
if (TrySplit)
++numSplits;
bool NeedStackSlot = false;
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
const VNInfo *VNI = *i;
unsigned VN = VNI->id;
unsigned DefIdx = VNI->def;
if (DefIdx == ~1U)
continue; // Dead val#.
// Is the def for the val# rematerializable?
MachineInstr *ReMatDefMI = (DefIdx == ~0u)
? 0 : getInstructionFromIndex(DefIdx);
if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI)) {
// Original def may be modified so we have to make a copy here. vrm must
// delete these!
ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
vrm.setVirtIsReMaterialized(li.reg, ReMatDefMI);
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
bool CanDelete = true;
for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
unsigned KillIdx = VNI->kills[j];
MachineInstr *KillMI = (KillIdx & 1)
? NULL : getInstructionFromIndex(KillIdx);
// Kill is a phi node, not all of its uses can be rematerialized.
// It must not be deleted.
if (!KillMI) {
CanDelete = false;
// Need a stack slot if there is any live range where uses cannot be
// rematerialized.
NeedStackSlot = true;
break;
}
}
if (CanDelete)
ReMatDelete.set(VN);
} else {
// Need a stack slot if there is any live range where uses cannot be
// rematerialized.
NeedStackSlot = true;
}
}
// One stack slot per live interval.
if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
Slot = vrm.assignVirt2StackSlot(li.reg);
// Create new intervals and rewrite defs and uses.
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
bool DefIsReMat = ReMatDefMI != NULL;
bool CanDelete = ReMatDelete[I->valno->id];
int LdSlot = 0;
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
(DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
CanDelete, vrm, RegMap, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
NewVRegs, NewLIs);
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
// Insert spills / restores if we are splitting.
if (TrySplit) {
if (NeedStackSlot) {
int Id = SpillMBBs.find_first();
while (Id != -1) {
unsigned VReg = NewVRegs[Id];
int index = SpillIdxes[Id].first;
bool DoFold = SpillIdxes[Id].second;
bool isReMat = vrm.isReMaterialized(VReg);
MachineInstr *MI = getInstructionFromIndex(index);
int OpIdx = -1;
bool FoldedLoad = false;
if (DoFold) {
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = MI->getOperand(j);
if (!MO.isRegister() || MO.getReg() != VReg)
continue;
if (MO.isUse()) {
// Can't fold if it's two-address code and the use isn't the
// first and only use.
// If there are more than one uses, a load is still needed.
if (!isReMat && !FoldedLoad &&
RestoreMBBs[Id] && RestoreIdxes[Id].first == index &&
RestoreIdxes[Id].second) {
FoldedLoad = true;
continue;
} else {
OpIdx = -1;
break;
}
}
OpIdx = (int)j;
}
}
// Fold the store into the def if possible.
if (OpIdx == -1)
DoFold = false;
if (DoFold) {
if (tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, true, Slot,
VReg)) {
if (FoldedLoad) {
// Folded a two-address instruction, do not issue a load.
RestoreMBBs.reset(Id);
RestoreIdxes.erase(Id);
}
} else
DoFold = false;
}
// Else tell the spiller to issue a store for us.
if (!DoFold)
vrm.addSpillPoint(VReg, MI);
Id = SpillMBBs.find_next(Id);
}
}
int Id = RestoreMBBs.find_first();
unsigned VReg = NewVRegs[Id];
int index = RestoreIdxes[Id].first;
bool DoFold = RestoreIdxes[Id].second;
MachineInstr *MI = getInstructionFromIndex(index);
int OpIdx = -1;
if (DoFold) {
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = MI->getOperand(j);
if (!MO.isRegister() || MO.getReg() != VReg)
continue;
if (MO.isDef()) {
// Can't fold if it's two-address code.
OpIdx = -1;
break;
}
if (OpIdx != -1) {
// Multiple uses, do not fold!
OpIdx = -1;
break;
}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
// Fold the load into the use if possible.
if (OpIdx == -1)
DoFold = false;
if (DoFold) {
if (vrm.isReMaterialized(VReg)) {
MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
int LdSlot = 0;
bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
// If the rematerializable def is a load, also try to fold it.
if (isLoadSS ||
(ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG))
DoFold = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, OpIdx,
isLoadSS, LdSlot, VReg);
else
DoFold = false;
} else
DoFold = tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx,
true, Slot, VReg);
}
// If folding is not possible / failed, then tell the spiller to issue a
// load / rematerialization for us.
if (!DoFold)
vrm.addRestorePoint(VReg, MI);
Id = RestoreMBBs.find_next(Id);
}
}
// Finalize spill weights.
if (TrySplit)
for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
NewLIs[i]->weight /= NewLIs[i]->getSize();