"llvm/lib/git@repo.hca.bsc.es:rferrer/llvm-epi-0.8.git" did not exist on "383b61e791db4ed6440174601283de3c08a5469f"
Newer
Older
Jakob Stoklund Olesen
committed
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
for (unsigned k = 0, e = MI.getNumOperands(); k != e; ++k) {
MachineOperand &MOk = MI.getOperand(k);
if (MOk.isReg() && MOk.isEarlyClobber() &&
TRI->regsOverlap(MOk.getReg(), PhysReg)) {
CanReuse = false;
DEBUG(dbgs() << "Not reusing physreg " << TRI->getName(PhysReg)
<< " for vreg" << VirtReg << ": " << MOk << '\n');
break;
}
}
}
if (CanReuse) {
// If this stack slot value is already available, reuse it!
if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
DEBUG(dbgs() << "Reusing RM#"
<< ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
else
DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
DEBUG(dbgs() << " from physreg "
<< TRI->getName(PhysReg) << " for vreg"
<< VirtReg <<" instead of reloading into physreg "
<< TRI->getName(VRM->getPhys(VirtReg)) << '\n');
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
MI.getOperand(i).setReg(RReg);
MI.getOperand(i).setSubReg(0);
// The only technical detail we have is that we don't know that
// PhysReg won't be clobbered by a reloaded stack slot that occurs
// later in the instruction. In particular, consider 'op V1, V2'.
// If V1 is available in physreg R0, we would choose to reuse it
// here, instead of reloading it into the register the allocator
// indicated (say R1). However, V2 might have to be reloaded
// later, and it might indicate that it needs to live in R0. When
// this occurs, we need to have information available that
// indicates it is safe to use R1 for the reload instead of R0.
//
// To further complicate matters, we might conflict with an alias,
// or R0 and R1 might not be compatible with each other. In this
// case, we actually insert a reload for V1 in R1, ensuring that
// we can get at R0 or its alias.
ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
VRM->getPhys(VirtReg), VirtReg);
if (isTied)
// Only mark it clobbered if this is a use&def operand.
ReusedOperands.markClobbered(PhysReg);
++NumReused;
if (MI.getOperand(i).isKill() &&
ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
// The store of this spilled value is potentially dead, but we
// won't know for certain until we've confirmed that the re-use
// above is valid, which means waiting until the other operands
// are processed. For now we just track the spill slot, we'll
// remove it after the other operands are processed if valid.
PotentialDeadStoreSlots.push_back(ReuseSlot);
}
// Mark is isKill if it's there no other uses of the same virtual
// register and it's not a two-address operand. IsKill will be
// unset if reg is reused.
if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
MI.getOperand(i).setIsKill();
KilledMIRegs.insert(VirtReg);
}
continue;
} // CanReuse
// Otherwise we have a situation where we have a two-address instruction
// whose mod/ref operand needs to be reloaded. This reload is already
// available in some register "PhysReg", but if we used PhysReg as the
// operand to our 2-addr instruction, the instruction would modify
// PhysReg. This isn't cool if something later uses PhysReg and expects
// to get its initial value.
//
// To avoid this problem, and to avoid doing a load right after a store,
// we emit a copy from PhysReg into the designated register for this
// operand.
//
// This case also applies to an earlyclobber'd PhysReg.
unsigned DesignatedReg = VRM->getPhys(VirtReg);
assert(DesignatedReg && "Must map virtreg to physreg!");
// Note that, if we reused a register for a previous operand, the
// register we want to reload into might not actually be
// available. If this occurs, use the register indicated by the
// reuser.
if (ReusedOperands.hasReuses())
DesignatedReg = ReusedOperands.
GetRegForReload(VirtReg, DesignatedReg, &MI, Spills,
MaybeDeadStores, RegKills, KillOps, *VRM);
// If the mapped designated register is actually the physreg we have
// incoming, we don't need to inserted a dead copy.
if (DesignatedReg == PhysReg) {
// If this stack slot value is already available, reuse it!
if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
DEBUG(dbgs() << "Reusing RM#"
<< ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
else
DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
DEBUG(dbgs() << " from physreg " << TRI->getName(PhysReg)
<< " for vreg" << VirtReg
<< " instead of reloading into same physreg.\n");
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
MI.getOperand(i).setReg(RReg);
MI.getOperand(i).setSubReg(0);
ReusedOperands.markClobbered(RReg);
++NumReused;
continue;
}
MRI->setPhysRegUsed(DesignatedReg);
ReusedOperands.markClobbered(DesignatedReg);
// Back-schedule reloads and remats.
MachineBasicBlock::iterator InsertLoc =
ComputeReloadLoc(&MI, MBB->begin(), PhysReg, TRI, DoReMat,
SSorRMId, TII, *MBB->getParent());
MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI.getDebugLoc(),
TII->get(TargetOpcode::COPY),
DesignatedReg).addReg(PhysReg);
CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
UpdateKills(*CopyMI, TRI, RegKills, KillOps);
// This invalidates DesignatedReg.
Spills.ClobberPhysReg(DesignatedReg);
Spills.addAvailable(ReuseSlot, DesignatedReg);
unsigned RReg =
SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
MI.getOperand(i).setReg(RReg);
MI.getOperand(i).setSubReg(0);
DEBUG(dbgs() << '\t' << *prior(InsertLoc));
++NumReused;
continue;
} // if (PhysReg)
// Otherwise, reload it and remember that we have it.
PhysReg = VRM->getPhys(VirtReg);
assert(PhysReg && "Must map virtreg to physreg!");
// Note that, if we reused a register for a previous operand, the
// register we want to reload into might not actually be
// available. If this occurs, use the register indicated by the
// reuser.
if (ReusedOperands.hasReuses())
PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
MRI->setPhysRegUsed(PhysReg);
ReusedOperands.markClobbered(PhysReg);
if (AvoidReload)
++NumAvoided;
else {
// Back-schedule reloads and remats.
MachineBasicBlock::iterator InsertLoc =
ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI, DoReMat,
SSorRMId, TII, *MBB->getParent());
if (DoReMat) {
ReMaterialize(*MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, *VRM);
} else {
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SSorRMId, RC,TRI);
MachineInstr *LoadMI = prior(InsertLoc);
VRM->addSpillSlotUse(SSorRMId, LoadMI);
++NumLoads;
DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
}
// This invalidates PhysReg.
Spills.ClobberPhysReg(PhysReg);
// Any stores to this stack slot are not dead anymore.
if (!DoReMat)
MaybeDeadStores[SSorRMId] = NULL;
Spills.addAvailable(SSorRMId, PhysReg);
// Assumes this is the last use. IsKill will be unset if reg is reused
// unless it's a two-address operand.
if (!MI.isRegTiedToDefOperand(i) &&
KilledMIRegs.count(VirtReg) == 0) {
MI.getOperand(i).setIsKill();
KilledMIRegs.insert(VirtReg);
}
UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
DEBUG(dbgs() << '\t' << *prior(InsertLoc));
}
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
MI.getOperand(i).setReg(RReg);
MI.getOperand(i).setSubReg(0);
}
// Ok - now we can remove stores that have been confirmed dead.
for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
// This was the last use and the spilled value is still available
// for reuse. That means the spill was unnecessary!
int PDSSlot = PotentialDeadStoreSlots[j];
MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
if (DeadStore) {
DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(DeadStore);
MBB->erase(DeadStore);
MaybeDeadStores[PDSSlot] = NULL;
++NumDSE;
}
}
}
/// rewriteMBB - Keep track of which spills are available even after the
/// register allocator is done with them. If possible, avoid reloading vregs.
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
void
LocalRewriter::RewriteMBB(LiveIntervals *LIs,
AvailableSpills &Spills, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
<< MBB->getName() << "':\n");
MachineFunction &MF = *MBB->getParent();
// MaybeDeadStores - When we need to write a value back into a stack slot,
// keep track of the inserted store. If the stack slot value is never read
// (because the value was used from some available register, for example), and
// subsequently stored to, the original store is dead. This map keeps track
// of inserted stores that are not used. If we see a subsequent store to the
// same stack slot, the original store is deleted.
std::vector<MachineInstr*> MaybeDeadStores;
MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
// ReMatDefs - These are rematerializable def MIs which are not deleted.
SmallSet<MachineInstr*, 4> ReMatDefs;
Jakob Stoklund Olesen
committed
// Keep track of the registers we have already spilled in case there are
// multiple defs of the same register in MI.
SmallSet<unsigned, 8> SpilledMIRegs;
RegKills.reset();
KillOps.clear();
KillOps.resize(TRI->getNumRegs(), NULL);
DistanceMap.clear();
for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
MII != E; ) {
MachineBasicBlock::iterator NextMII = llvm::next(MII);
if (OptimizeByUnfold(MII, MaybeDeadStores, Spills, RegKills, KillOps))
NextMII = llvm::next(MII);
Jakob Stoklund Olesen
committed
if (InsertEmergencySpills(MII))
NextMII = llvm::next(MII);
Jakob Stoklund Olesen
committed
InsertRestores(MII, Spills, RegKills, KillOps);
Jakob Stoklund Olesen
committed
if (InsertSpills(MII))
NextMII = llvm::next(MII);
Jakob Stoklund Olesen
committed
bool Erased = false;
bool BackTracked = false;
MachineInstr &MI = *MII;
// Remember DbgValue's which reference stack slots.
if (MI.isDebugValue() && MI.getOperand(0).isFI())
Slot2DbgValues[MI.getOperand(0).getIndex()].push_back(&MI);
/// ReusedOperands - Keep track of operand reuse in case we need to undo
/// reuse.
ReuseInfo ReusedOperands(MI, TRI);
Jakob Stoklund Olesen
committed
ProcessUses(MI, Spills, MaybeDeadStores, RegKills, ReusedOperands, KillOps);
DEBUG(dbgs() << '\t' << MI);
// If we have folded references to memory operands, make sure we clear all
// physical registers that may contain the value of the spilled virtual
// register
Jakob Stoklund Olesen
committed
// Copy the folded virts to a small vector, we may change MI2VirtMap.
SmallVector<std::pair<unsigned, VirtRegMap::ModRef>, 4> FoldedVirts;
// C++0x FTW!
for (std::pair<VirtRegMap::MI2VirtMapTy::const_iterator,
VirtRegMap::MI2VirtMapTy::const_iterator> FVRange =
VRM->getFoldedVirts(&MI);
FVRange.first != FVRange.second; ++FVRange.first)
FoldedVirts.push_back(FVRange.first->second);
SmallSet<int, 2> FoldedSS;
Jakob Stoklund Olesen
committed
for (unsigned FVI = 0, FVE = FoldedVirts.size(); FVI != FVE; ++FVI) {
unsigned VirtReg = FoldedVirts[FVI].first;
VirtRegMap::ModRef MR = FoldedVirts[FVI].second;
DEBUG(dbgs() << "Folded vreg: " << VirtReg << " MR: " << MR);
int SS = VRM->getStackSlot(VirtReg);
if (SS == VirtRegMap::NO_STACK_SLOT)
continue;
FoldedSS.insert(SS);
DEBUG(dbgs() << " - StackSlot: " << SS << "\n");
// If this folded instruction is just a use, check to see if it's a
// straight load from the virt reg slot.
if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
int FrameIdx;
unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
if (DestReg && FrameIdx == SS) {
// If this spill slot is available, turn it into a copy (or nothing)
// instead of leaving it as a load!
if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
DEBUG(dbgs() << "Promoted Load To Copy: " << MI);
if (DestReg != InReg) {
MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
Jakob Stoklund Olesen
committed
MachineInstr *CopyMI = BuildMI(*MBB, &MI, MI.getDebugLoc(),
TII->get(TargetOpcode::COPY))
.addReg(DestReg, RegState::Define, DefMO->getSubReg())
.addReg(InReg, RegState::Kill);
// Revisit the copy so we make sure to notice the effects of the
// operation on the destreg (either needing to RA it if it's
// virtual or needing to clobber any values if it's physical).
Jakob Stoklund Olesen
committed
NextMII = CopyMI;
NextMII->setAsmPrinterFlag(MachineInstr::ReloadReuse);
BackTracked = true;
} else {
DEBUG(dbgs() << "Removing now-noop copy: " << MI);
// Unset last kill since it's being reused.
InvalidateKill(InReg, TRI, RegKills, KillOps);
Spills.disallowClobberPhysReg(InReg);
}
InvalidateKills(MI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(&MI);
MBB->erase(&MI);
Erased = true;
goto ProcessNextInst;
}
} else {
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
SmallVector<MachineInstr*, 4> NewMIs;
if (PhysReg &&
TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)){
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
MBB->insert(MII, NewMIs[0]);
InvalidateKills(MI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(&MI);
MBB->erase(&MI);
Erased = true;
--NextMII; // backtrack to the unfolded instruction.
BackTracked = true;
goto ProcessNextInst;
}
}
}
// If this reference is not a use, any previous store is now dead.
// Otherwise, the store to this stack slot is not dead anymore.
MachineInstr* DeadStore = MaybeDeadStores[SS];
if (DeadStore) {
bool isDead = !(MR & VirtRegMap::isRef);
MachineInstr *NewStore = NULL;
if (MR & VirtRegMap::isModRef) {
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
SmallVector<MachineInstr*, 4> NewMIs;
// We can reuse this physreg as long as we are allowed to clobber
// the value and there isn't an earlier def that has already clobbered
// the physreg.
if (PhysReg &&
!ReusedOperands.isClobbered(PhysReg) &&
Spills.canClobberPhysReg(PhysReg) &&
!TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
MachineOperand *KillOpnd =
DeadStore->findRegisterUseOperand(PhysReg, true);
// Note, if the store is storing a sub-register, it's possible the
// super-register is needed below.
if (KillOpnd && !KillOpnd->getSubReg() &&
TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
MBB->insert(MII, NewMIs[0]);
NewStore = NewMIs[1];
MBB->insert(MII, NewStore);
VRM->addSpillSlotUse(SS, NewStore);
InvalidateKills(MI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(&MI);
MBB->erase(&MI);
Erased = true;
--NextMII;
--NextMII; // backtrack to the unfolded instruction.
BackTracked = true;
isDead = true;
++NumSUnfold;
}
}
}
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
if (isDead) { // Previous store is dead.
// If we get here, the store is dead, nuke it now.
DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(DeadStore);
MBB->erase(DeadStore);
if (!NewStore)
++NumDSE;
}
MaybeDeadStores[SS] = NULL;
if (NewStore) {
// Treat this store as a spill merged into a copy. That makes the
// stack slot value available.
VRM->virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
goto ProcessNextInst;
}
}
// If the spill slot value is available, and this is a new definition of
// the value, the value is not available anymore.
if (MR & VirtRegMap::isMod) {
// Notice that the value in this stack slot has been modified.
Spills.ModifyStackSlotOrReMat(SS);
// If this is *just* a mod of the value, check to see if this is just a
// store to the spill slot (i.e. the spill got merged into the copy). If
// so, realize that the vreg is available now, and add the store to the
// MaybeDeadStore info.
int StackSlot;
if (!(MR & VirtRegMap::isRef)) {
if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
"Src hasn't been allocated yet?");
if (CommuteToFoldReload(MII, VirtReg, SrcReg, StackSlot,
Spills, RegKills, KillOps, TRI)) {
NextMII = llvm::next(MII);
BackTracked = true;
goto ProcessNextInst;
}
// Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
// this as a potentially dead store in case there is a subsequent
// store into the stack slot without a read from it.
MaybeDeadStores[StackSlot] = &MI;
// If the stack slot value was previously available in some other
// register, change it now. Otherwise, make the register
// available in PhysReg.
Spills.addAvailable(StackSlot, SrcReg, MI.killsRegister(SrcReg));
}
}
}
}
// Process all of the spilled defs.
Jakob Stoklund Olesen
committed
SpilledMIRegs.clear();
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!(MO.isReg() && MO.getReg() && MO.isDef()))
continue;
unsigned VirtReg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
// Check to see if this is a noop copy. If so, eliminate the
// instruction before considering the dest reg to be changed.
// Also check if it's copying from an "undef", if so, we can't
// eliminate this or else the undef marker is lost and it will
// confuses the scavenger. This is extremely rare.
if (MI.isIdentityCopy() && !MI.getOperand(1).isUndef() &&
MI.getNumOperands() == 2) {
++NumDCE;
DEBUG(dbgs() << "Removing now-noop copy: " << MI);
SmallVector<unsigned, 2> KillRegs;
InvalidateKills(MI, TRI, RegKills, KillOps, &KillRegs);
if (MO.isDead() && !KillRegs.empty()) {
// Source register or an implicit super/sub-register use is killed.
assert(TRI->regsOverlap(KillRegs[0], MI.getOperand(0).getReg()));
// Last def is now dead.
TransferDeadness(MI.getOperand(1).getReg(), RegKills, KillOps);
}
VRM->RemoveMachineInstrFromMaps(&MI);
MBB->erase(&MI);
Erased = true;
Spills.disallowClobberPhysReg(VirtReg);
goto ProcessNextInst;
}
// If it's not a no-op copy, it clobbers the value in the destreg.
Spills.ClobberPhysReg(VirtReg);
ReusedOperands.markClobbered(VirtReg);
// Check to see if this instruction is a load from a stack slot into
// a register. If so, this provides the stack slot value in the reg.
int FrameIdx;
if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
assert(DestReg == VirtReg && "Unknown load situation!");
// If it is a folded reference, then it's not safe to clobber.
bool Folded = FoldedSS.count(FrameIdx);
// Otherwise, if it wasn't available, remember that it is now!
Spills.addAvailable(FrameIdx, DestReg, !Folded);
goto ProcessNextInst;
}
continue;
}
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
unsigned SubIdx = MO.getSubReg();
bool DoReMat = VRM->isReMaterialized(VirtReg);
if (DoReMat)
ReMatDefs.insert(&MI);
// The only vregs left are stack slot definitions.
int StackSlot = VRM->getStackSlot(VirtReg);
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
// If this def is part of a two-address operand, make sure to execute
// the store from the correct physical register.
unsigned PhysReg;
unsigned TiedOp;
if (MI.isRegTiedToUseOperand(i, &TiedOp)) {
PhysReg = MI.getOperand(TiedOp).getReg();
if (SubIdx) {
unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
"Can't find corresponding super-register!");
PhysReg = SuperReg;
}
} else {
PhysReg = VRM->getPhys(VirtReg);
if (ReusedOperands.isClobbered(PhysReg)) {
// Another def has taken the assigned physreg. It must have been a
// use&def which got it due to reuse. Undo the reuse!
PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
}
}
assert(PhysReg && "VR not assigned a physical register?");
MRI->setPhysRegUsed(PhysReg);
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
ReusedOperands.markClobbered(RReg);
MI.getOperand(i).setReg(RReg);
MI.getOperand(i).setSubReg(0);
Jakob Stoklund Olesen
committed
if (!MO.isDead() && SpilledMIRegs.insert(VirtReg)) {
MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
SpillRegToStackSlot(MII, -1, PhysReg, StackSlot, RC, true,
LastStore, Spills, ReMatDefs, RegKills, KillOps);
NextMII = llvm::next(MII);
// Check to see if this is a noop copy. If so, eliminate the
// instruction before considering the dest reg to be changed.
if (MI.isIdentityCopy()) {
++NumDCE;
DEBUG(dbgs() << "Removing now-noop copy: " << MI);
InvalidateKills(MI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(&MI);
MBB->erase(&MI);
Erased = true;
UpdateKills(*LastStore, TRI, RegKills, KillOps);
goto ProcessNextInst;
}
}
ProcessNextInst:
// Delete dead instructions without side effects.
if (!Erased && !BackTracked && isSafeToDelete(MI)) {
InvalidateKills(MI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(&MI);
MBB->erase(&MI);
Erased = true;
}
if (!Erased)
Jakob Stoklund Olesen
committed
DistanceMap.insert(std::make_pair(&MI, DistanceMap.size()));
if (!Erased && !BackTracked) {
for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
UpdateKills(*II, TRI, RegKills, KillOps);
}
MII = NextMII;
}
llvm::VirtRegRewriter* llvm::createVirtRegRewriter() {
switch (RewriterOpt) {
default: llvm_unreachable("Unreachable!");
case local:
return new LocalRewriter();
Lang Hames
committed
case trivial:
return new TrivialRewriter();