Newer
Older
std::map<unsigned, std::pair<int, unsigned> > SpillIdxes;
std::map<unsigned,unsigned> NewVRegs;
std::vector<LiveInterval*> NewLIs;
SSARegMap *RegMap = mf_->getSSARegMap();
const TargetRegisterClass* rc = RegMap->getRegClass(li.reg);
unsigned NumValNums = li.getNumValNums();
SmallVector<MachineInstr*, 4> ReMatDefs;
ReMatDefs.resize(NumValNums, NULL);
SmallVector<MachineInstr*, 4> ReMatOrigDefs;
ReMatOrigDefs.resize(NumValNums, NULL);
SmallVector<int, 4> ReMatIds;
ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
BitVector ReMatDelete(NumValNums);
unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
// Spilling a split live interval. It cannot be split any further. Also,
// it's also guaranteed to be a single val# / range interval.
if (vrm.getPreSplitReg(li.reg)) {
vrm.setIsSplitFromReg(li.reg, 0);
bool DefIsReMat = vrm.isReMaterialized(li.reg);
Slot = vrm.getStackSlot(li.reg);
assert(Slot != VirtRegMap::MAX_STACK_SLOT);
MachineInstr *ReMatDefMI = DefIsReMat ?
vrm.getReMaterializedMI(li.reg) : NULL;
int LdSlot = 0;
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
bool isLoad = isLoadSS ||
(DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
bool IsFirstRange = true;
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
// If this is a split live interval with multiple ranges, it means there
// are two-address instructions that re-defined the value. Only the
// first def can be rematerialized!
if (IsFirstRange) {
rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
false, vrm, RegMap, rc, ReMatIds,
loopInfo, SpillMBBs, SpillIdxes, NewVRegs, NewLIs);
} else {
rewriteInstructionsForSpills(li, false, I, NULL, 0,
Slot, 0, false, false, false,
false, vrm, RegMap, rc, ReMatIds,
loopInfo, SpillMBBs, SpillIdxes, NewVRegs, NewLIs);
}
IsFirstRange = false;
}
return NewLIs;
}
bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
bool NeedStackSlot = false;
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
const VNInfo *VNI = *i;
unsigned VN = VNI->id;
unsigned DefIdx = VNI->def;
if (DefIdx == ~1U)
continue; // Dead val#.
// Is the def for the val# rematerializable?
MachineInstr *ReMatDefMI = (DefIdx == ~0u)
? 0 : getInstructionFromIndex(DefIdx);
if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI)) {
// Original def may be modified so we have to make a copy here. vrm must
// delete these!
ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
vrm.setVirtIsReMaterialized(li.reg, ReMatDefMI);
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
bool CanDelete = true;
for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
unsigned KillIdx = VNI->kills[j];
MachineInstr *KillMI = (KillIdx & 1)
? NULL : getInstructionFromIndex(KillIdx);
// Kill is a phi node, not all of its uses can be rematerialized.
// It must not be deleted.
if (!KillMI) {
CanDelete = false;
// Need a stack slot if there is any live range where uses cannot be
// rematerialized.
NeedStackSlot = true;
break;
}
}
if (CanDelete)
ReMatDelete.set(VN);
} else {
// Need a stack slot if there is any live range where uses cannot be
// rematerialized.
NeedStackSlot = true;
}
}
// One stack slot per live interval.
if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
Slot = vrm.assignVirt2StackSlot(li.reg);
// Create new intervals and rewrite defs and uses.
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
bool DefIsReMat = ReMatDefMI != NULL;
bool CanDelete = ReMatDelete[I->valno->id];
int LdSlot = 0;
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
(DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
CanDelete, vrm, RegMap, rc, ReMatIds,
loopInfo, SpillMBBs, SpillIdxes, NewVRegs, NewLIs);
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
// Insert spills if we are splitting.
if (TrySplit && NeedStackSlot) {
int Id = SpillMBBs.find_first();
while (Id != -1) {
unsigned index = SpillIdxes[Id].first;
unsigned VReg = SpillIdxes[Id].second & ((1 << 31)-1);
bool TryFold = SpillIdxes[Id].second & (1 << 31);
MachineInstr *MI = getInstructionFromIndex(index);
int OpIdx = -1;
if (TryFold) {
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = MI->getOperand(j);
if (!MO.isRegister() || MO.getReg() != VReg)
continue;
if (MO.isUse()) {
// Can't fold if it's two-address code.
OpIdx = -1;
break;
}
OpIdx = (int)j;
}
}
// Fold the store into the def if possible.
if (OpIdx == -1 ||
!tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, true, Slot, VReg))
// Else tell the spiller to issue a store for us.
vrm.addSpillPoint(VReg, MI);
Id = SpillMBBs.find_next(Id);
}
}
// Finalize spill weights.
if (TrySplit)
for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
NewLIs[i]->weight /= NewLIs[i]->getSize();