Newer
Older
ValuesPerBlock.push_back(std::make_pair(DepBB, S->getOperand(0)));
} else if (LoadInst* LD = dyn_cast<LoadInst>(DepInst)) {
if (LD->getType() != LI->getType()) {
UnavailableBlocks.push_back(DepBB);
continue;
}
ValuesPerBlock.push_back(std::make_pair(DepBB, LD));
UnavailableBlocks.push_back(DepBB);
continue;
// If we have no predecessors that produce a known value for this load, exit
// early.
if (ValuesPerBlock.empty()) return false;
// If all of the instructions we depend on produce a known value for this
// load, then it is fully redundant and we can use PHI insertion to compute
// its value. Insert PHIs and remove the fully redundant value now.
if (UnavailableBlocks.empty()) {
// Use cached PHI construction information from previous runs
SmallPtrSet<Instruction*, 4> &p = phiMap[LI->getPointerOperand()];
// FIXME: What does phiMap do? Are we positive it isn't getting invalidated?
for (SmallPtrSet<Instruction*, 4>::iterator I = p.begin(), E = p.end();
I != E; ++I) {
if ((*I)->getParent() == LI->getParent()) {
DEBUG(cerr << "GVN REMOVING NONLOCAL LOAD #1: " << *LI);
LI->replaceAllUsesWith(*I);
if (isa<PointerType>((*I)->getType()))
MD->invalidateCachedPointerInfo(*I);
toErase.push_back(LI);
NumGVNLoad++;
return true;
}
ValuesPerBlock.push_back(std::make_pair((*I)->getParent(), *I));
DEBUG(cerr << "GVN REMOVING NONLOCAL LOAD: " << *LI);
DenseMap<BasicBlock*, Value*> BlockReplValues;
BlockReplValues.insert(ValuesPerBlock.begin(), ValuesPerBlock.end());
// Perform PHI construction.
Value* v = GetValueForBlock(LI->getParent(), LI, BlockReplValues, true);
LI->replaceAllUsesWith(v);
if (!isa<GlobalValue>(v))
v->takeName(LI);
if (isa<PointerType>(v->getType()))
MD->invalidateCachedPointerInfo(v);
toErase.push_back(LI);
NumGVNLoad++;
return true;
if (!EnablePRE || !EnableLoadPRE)
return false;
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
// Okay, we have *some* definitions of the value. This means that the value
// is available in some of our (transitive) predecessors. Lets think about
// doing PRE of this load. This will involve inserting a new load into the
// predecessor when it's not available. We could do this in general, but
// prefer to not increase code size. As such, we only do this when we know
// that we only have to insert *one* load (which means we're basically moving
// the load, not inserting a new one).
// Everything we do here is based on local predecessors of LI's block. If it
// only has one predecessor, bail now.
BasicBlock *LoadBB = LI->getParent();
if (LoadBB->getSinglePredecessor())
return false;
// If we have a repl set with LI itself in it, this means we have a loop where
// at least one of the values is LI. Since this means that we won't be able
// to eliminate LI even if we insert uses in the other predecessors, we will
// end up increasing code size. Reject this by scanning for LI.
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
if (ValuesPerBlock[i].second == LI)
return false;
// Okay, we have some hope :). Check to see if the loaded value is fully
// available in all but one predecessor.
// FIXME: If we could restructure the CFG, we could make a common pred with
// all the preds that don't have an available LI and insert a new load into
// that one block.
BasicBlock *UnavailablePred = 0;
DenseMap<BasicBlock*, char> FullyAvailableBlocks;
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
FullyAvailableBlocks[ValuesPerBlock[i].first] = true;
for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
PI != E; ++PI) {
if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
continue;
// If this load is not available in multiple predecessors, reject it.
if (UnavailablePred && UnavailablePred != *PI)
return false;
UnavailablePred = *PI;
}
assert(UnavailablePred != 0 &&
"Fully available value should be eliminated above!");
// If the loaded pointer is PHI node defined in this block, do PHI translation
// to get its value in the predecessor.
Value *LoadPtr = LI->getOperand(0)->DoPHITranslation(LoadBB, UnavailablePred);
// Make sure the value is live in the predecessor. If it was defined by a
// non-PHI instruction in this block, we don't know how to recompute it above.
if (Instruction *LPInst = dyn_cast<Instruction>(LoadPtr))
if (!DT->dominates(LPInst->getParent(), UnavailablePred)) {
DEBUG(cerr << "COULDN'T PRE LOAD BECAUSE PTR IS UNAVAILABLE IN PRED: "
<< *LPInst << *LI << "\n");
return false;
}
// We don't currently handle critical edges :(
if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) {
DEBUG(cerr << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
<< UnavailablePred->getName() << "': " << *LI);
return false;
}
// Okay, we can eliminate this load by inserting a reload in the predecessor
// and using PHI construction to get the value in the other predecessors, do
// it.
DEBUG(cerr << "GVN REMOVING PRE LOAD: " << *LI);
Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
LI->getAlignment(),
UnavailablePred->getTerminator());
DenseMap<BasicBlock*, Value*> BlockReplValues;
BlockReplValues.insert(ValuesPerBlock.begin(), ValuesPerBlock.end());
BlockReplValues[UnavailablePred] = NewLoad;
// Perform PHI construction.
Value* v = GetValueForBlock(LI->getParent(), LI, BlockReplValues, true);
LI->replaceAllUsesWith(v);
if (!isa<GlobalValue>(v))
v->takeName(LI);
if (isa<PointerType>(v->getType()))
MD->invalidateCachedPointerInfo(v);
toErase.push_back(LI);
NumPRELoad++;
return true;
}
/// processLoad - Attempt to eliminate a load, first by eliminating it
/// locally, and then attempting non-local elimination if that fails.
bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
if (L->isVolatile())
Owen Anderson
committed
return false;
Value* pointer = L->getPointerOperand();
Owen Anderson
committed
// ... to a pointer that has been loaded from before...
MemDepResult dep = MD->getDependency(L);
// If the value isn't available, don't do anything!
if (dep.isClobber())
return false;
// If it is defined in another block, try harder.
return processNonLocalLoad(L, toErase);
Instruction *DepInst = dep.getInst();
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
// Only forward substitute stores to loads of the same type.
// FIXME: Could do better!
if (DepSI->getPointerOperand()->getType() != pointer->getType())
return false;
Owen Anderson
committed
// Remove it!
L->replaceAllUsesWith(DepSI->getOperand(0));
if (isa<PointerType>(DepSI->getOperand(0)->getType()))
MD->invalidateCachedPointerInfo(DepSI->getOperand(0));
toErase.push_back(L);
NumGVNLoad++;
return true;
}
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
// Only forward substitute stores to loads of the same type.
// FIXME: Could do better! load i32 -> load i8 -> truncate on little endian.
if (DepLI->getType() != L->getType())
return false;
Owen Anderson
committed
// Remove it!
L->replaceAllUsesWith(DepLI);
if (isa<PointerType>(DepLI->getType()))
MD->invalidateCachedPointerInfo(DepLI);
toErase.push_back(L);
NumGVNLoad++;
return true;
Owen Anderson
committed
}
// If this load really doesn't depend on anything, then we must be loading an
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example.
if (isa<AllocationInst>(DepInst)) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
toErase.push_back(L);
NumGVNLoad++;
}
Owen Anderson
committed
}
Owen Anderson
committed
Value* GVN::lookupNumber(BasicBlock* BB, uint32_t num) {
Owen Anderson
committed
DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
if (I == localAvail.end())
return 0;
ValueNumberScope* locals = I->second;
Owen Anderson
committed
while (locals) {
DenseMap<uint32_t, Value*>::iterator I = locals->table.find(num);
if (I != locals->table.end())
return I->second;
else
locals = locals->parent;
}
return 0;
}
Owen Anderson
committed
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
/// AttemptRedundancyElimination - If the "fast path" of redundancy elimination
/// by inheritance from the dominator fails, see if we can perform phi
/// construction to eliminate the redundancy.
Value* GVN::AttemptRedundancyElimination(Instruction* orig, unsigned valno) {
BasicBlock* BaseBlock = orig->getParent();
SmallPtrSet<BasicBlock*, 4> Visited;
SmallVector<BasicBlock*, 8> Stack;
Stack.push_back(BaseBlock);
DenseMap<BasicBlock*, Value*> Results;
// Walk backwards through our predecessors, looking for instances of the
// value number we're looking for. Instances are recorded in the Results
// map, which is then used to perform phi construction.
while (!Stack.empty()) {
BasicBlock* Current = Stack.back();
Stack.pop_back();
// If we've walked all the way to a proper dominator, then give up. Cases
// where the instance is in the dominator will have been caught by the fast
// path, and any cases that require phi construction further than this are
// probably not worth it anyways. Note that this is a SIGNIFICANT compile
// time improvement.
if (DT->properlyDominates(Current, orig->getParent())) return 0;
DenseMap<BasicBlock*, ValueNumberScope*>::iterator LA =
localAvail.find(Current);
if (LA == localAvail.end()) return 0;
DenseMap<unsigned, Value*>::iterator V = LA->second->table.find(valno);
if (V != LA->second->table.end()) {
// Found an instance, record it.
Results.insert(std::make_pair(Current, V->second));
continue;
}
// If we reach the beginning of the function, then give up.
if (pred_begin(Current) == pred_end(Current))
return 0;
for (pred_iterator PI = pred_begin(Current), PE = pred_end(Current);
PI != PE; ++PI)
if (Visited.insert(*PI))
Stack.push_back(*PI);
}
// If we didn't find instances, give up. Otherwise, perform phi construction.
if (Results.size() == 0)
return 0;
else
return GetValueForBlock(BaseBlock, orig, Results, true);
}
Owen Anderson
committed
/// processInstruction - When calculating availability, handle an instruction
Owen Anderson
committed
/// by inserting it into the appropriate sets
bool GVN::processInstruction(Instruction *I,
Chris Lattner
committed
SmallVectorImpl<Instruction*> &toErase) {
Owen Anderson
committed
if (LoadInst* L = dyn_cast<LoadInst>(I)) {
bool changed = processLoad(L, toErase);
Owen Anderson
committed
if (!changed) {
unsigned num = VN.lookup_or_add(L);
Owen Anderson
committed
localAvail[I->getParent()]->table.insert(std::make_pair(num, L));
Owen Anderson
committed
}
return changed;
}
Owen Anderson
committed
uint32_t nextNum = VN.getNextUnusedValueNumber();
Owen Anderson
committed
unsigned num = VN.lookup_or_add(I);
Chris Lattner
committed
Owen Anderson
committed
// Allocations are always uniquely numbered, so we can save time and memory
// by fast failing them.
Owen Anderson
committed
if (isa<AllocationInst>(I) || isa<TerminatorInst>(I)) {
Owen Anderson
committed
localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
Owen Anderson
committed
return false;
Owen Anderson
committed
}
Owen Anderson
committed
Owen Anderson
committed
if (PHINode* p = dyn_cast<PHINode>(I)) {
Value* constVal = CollapsePhi(p);
Owen Anderson
committed
if (constVal) {
for (PhiMapType::iterator PI = phiMap.begin(), PE = phiMap.end();
PI != PE; ++PI)
Owen Anderson
committed
p->replaceAllUsesWith(constVal);
if (isa<PointerType>(constVal->getType()))
MD->invalidateCachedPointerInfo(constVal);
Owen Anderson
committed
} else {
Owen Anderson
committed
localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
Owen Anderson
committed
}
Owen Anderson
committed
// If the number we were assigned was a brand new VN, then we don't
// need to do a lookup to see if the number already exists
// somewhere in the domtree: it can't!
} else if (num == nextNum) {
localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
Owen Anderson
committed
// Perform fast-path value-number based elimination of values inherited from
// dominators.
Owen Anderson
committed
} else if (Value* repl = lookupNumber(I->getParent(), num)) {
Owen Anderson
committed
// Remove it!
Owen Anderson
committed
I->replaceAllUsesWith(repl);
if (isa<PointerType>(repl->getType()))
MD->invalidateCachedPointerInfo(repl);
Owen Anderson
committed
toErase.push_back(I);
return true;
Owen Anderson
committed
#if 0
// Perform slow-pathvalue-number based elimination with phi construction.
} else if (Value* repl = AttemptRedundancyElimination(I, num)) {
// Remove it!
VN.erase(I);
I->replaceAllUsesWith(repl);
if (isa<PointerType>(repl->getType()))
MD->invalidateCachedPointerInfo(repl);
toErase.push_back(I);
return true;
#endif
Owen Anderson
committed
} else {
Owen Anderson
committed
localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
Owen Anderson
committed
}
return false;
}
// GVN::runOnFunction - This is the main transformation entry point for a
// function.
//
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTree>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
VN.setDomTree(DT);
bool changed = false;
bool shouldContinue = true;
Owen Anderson
committed
// Merge unconditional branches, allowing PRE to catch more
// optimization opportunities.
for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
BasicBlock* BB = FI;
++FI;
bool removedBlock = MergeBlockIntoPredecessor(BB, this);
if (removedBlock) NumGVNBlocks++;
changed |= removedBlock;
Owen Anderson
committed
}
DEBUG(cerr << "GVN iteration: " << Iteration << "\n");
shouldContinue = iterateOnFunction(F);
changed |= shouldContinue;
Owen Anderson
committed
if (EnablePRE) {
bool PREChanged = true;
while (PREChanged) {
PREChanged = performPRE(F);
Owen Anderson
committed
changed |= PREChanged;
Owen Anderson
committed
}
// FIXME: Should perform GVN again after PRE does something. PRE can move
// computations into blocks where they become fully redundant. Note that
// we can't do this until PRE's critical edge splitting updates memdep.
// Actually, when this happens, we should just fully integrate PRE into GVN.
cleanupGlobalSets();
Owen Anderson
committed
bool GVN::processBlock(BasicBlock* BB) {
DomTreeNode* DTN = DT->getNode(BB);
// FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
// incrementing BI before processing an instruction).
SmallVector<Instruction*, 8> toErase;
bool changed_function = false;
Owen Anderson
committed
if (DTN->getIDom())
Owen Anderson
committed
localAvail[BB] =
new ValueNumberScope(localAvail[DTN->getIDom()->getBlock()]);
else
localAvail[BB] = new ValueNumberScope(0);
Owen Anderson
committed
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
BI != BE;) {
changed_function |= processInstruction(BI, toErase);
if (toErase.empty()) {
++BI;
continue;
}
// If we need some instructions deleted, do it now.
NumGVNInstr += toErase.size();
// Avoid iterator invalidation.
bool AtStart = BI == BB->begin();
if (!AtStart)
--BI;
for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
E = toErase.end(); I != E; ++I) {
DEBUG(cerr << "GVN removed: " << **I);
MD->removeInstruction(*I);
if (AtStart)
BI = BB->begin();
else
++BI;
}
return changed_function;
}
Owen Anderson
committed
/// performPRE - Perform a purely local form of PRE that looks for diamond
/// control flow patterns and attempts to perform simple PRE at the join point.
bool GVN::performPRE(Function& F) {
Chris Lattner
committed
bool Changed = false;
SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
DenseMap<BasicBlock*, Value*> predMap;
Owen Anderson
committed
for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
BasicBlock* CurrentBlock = *DI;
// Nothing to PRE in the entry block.
if (CurrentBlock == &F.getEntryBlock()) continue;
for (BasicBlock::iterator BI = CurrentBlock->begin(),
BE = CurrentBlock->end(); BI != BE; ) {
Chris Lattner
committed
Instruction *CurInst = BI++;
if (isa<AllocationInst>(CurInst) || isa<TerminatorInst>(CurInst) ||
isa<PHINode>(CurInst) || CurInst->mayReadFromMemory() ||
CurInst->mayWriteToMemory())
Owen Anderson
committed
continue;
Chris Lattner
committed
uint32_t valno = VN.lookup(CurInst);
Owen Anderson
committed
// Look for the predecessors for PRE opportunities. We're
// only trying to solve the basic diamond case, where
// a value is computed in the successor and one predecessor,
// but not the other. We also explicitly disallow cases
// where the successor is its own predecessor, because they're
// more complicated to get right.
unsigned numWith = 0;
unsigned numWithout = 0;
BasicBlock* PREPred = 0;
predMap.clear();
Owen Anderson
committed
for (pred_iterator PI = pred_begin(CurrentBlock),
PE = pred_end(CurrentBlock); PI != PE; ++PI) {
// We're not interested in PRE where the block is its
Owen Anderson
committed
// own predecessor, on in blocks with predecessors
// that are not reachable.
if (*PI == CurrentBlock) {
Owen Anderson
committed
numWithout = 2;
Owen Anderson
committed
break;
} else if (!localAvail.count(*PI)) {
numWithout = 2;
break;
}
DenseMap<uint32_t, Value*>::iterator predV =
localAvail[*PI]->table.find(valno);
if (predV == localAvail[*PI]->table.end()) {
Owen Anderson
committed
PREPred = *PI;
numWithout++;
Chris Lattner
committed
} else if (predV->second == CurInst) {
Owen Anderson
committed
numWithout = 2;
} else {
Owen Anderson
committed
predMap[*PI] = predV->second;
Owen Anderson
committed
numWith++;
}
}
// Don't do PRE when it might increase code size, i.e. when
// we would need to insert instructions in more than one pred.
Chris Lattner
committed
if (numWithout != 1 || numWith == 0)
Owen Anderson
committed
continue;
// We can't do PRE safely on a critical edge, so instead we schedule
// the edge to be split and perform the PRE the next time we iterate
// on the function.
unsigned succNum = 0;
for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors();
i != e; ++i)
if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) {
succNum = i;
break;
}
if (isCriticalEdge(PREPred->getTerminator(), succNum)) {
toSplit.push_back(std::make_pair(PREPred->getTerminator(), succNum));
continue;
}
Owen Anderson
committed
// Instantiate the expression the in predecessor that lacked it.
// Because we are going top-down through the block, all value numbers
// will be available in the predecessor by the time we need them. Any
// that weren't original present will have been instantiated earlier
// in this loop.
Chris Lattner
committed
Instruction* PREInstr = CurInst->clone();
Owen Anderson
committed
bool success = true;
Chris Lattner
committed
for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
Value *Op = PREInstr->getOperand(i);
if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
continue;
if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
PREInstr->setOperand(i, V);
} else {
success = false;
break;
Owen Anderson
committed
}
// Fail out if we encounter an operand that is not available in
// the PRE predecessor. This is typically because of loads which
// are not value numbered precisely.
if (!success) {
delete PREInstr;
continue;
}
PREInstr->insertBefore(PREPred->getTerminator());
Chris Lattner
committed
PREInstr->setName(CurInst->getName() + ".pre");
Owen Anderson
committed
predMap[PREPred] = PREInstr;
Owen Anderson
committed
VN.add(PREInstr, valno);
NumGVNPRE++;
// Update the availability map to include the new instruction.
Owen Anderson
committed
localAvail[PREPred]->table.insert(std::make_pair(valno, PREInstr));
Owen Anderson
committed
// Create a PHI to make the value available in this block.
Chris Lattner
committed
PHINode* Phi = PHINode::Create(CurInst->getType(),
CurInst->getName() + ".pre-phi",
Owen Anderson
committed
CurrentBlock->begin());
for (pred_iterator PI = pred_begin(CurrentBlock),
PE = pred_end(CurrentBlock); PI != PE; ++PI)
Owen Anderson
committed
Phi->addIncoming(predMap[*PI], *PI);
Owen Anderson
committed
VN.add(Phi, valno);
Owen Anderson
committed
localAvail[CurrentBlock]->table[valno] = Phi;
Owen Anderson
committed
Chris Lattner
committed
CurInst->replaceAllUsesWith(Phi);
if (isa<PointerType>(Phi->getType()))
MD->invalidateCachedPointerInfo(Phi);
Chris Lattner
committed
VN.erase(CurInst);
Owen Anderson
committed
Chris Lattner
committed
DEBUG(cerr << "GVN PRE removed: " << *CurInst);
MD->removeInstruction(CurInst);
CurInst->eraseFromParent();
Changed = true;
Owen Anderson
committed
}
}
for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator
Anton Korobeynikov
committed
I = toSplit.begin(), E = toSplit.end(); I != E; ++I)
SplitCriticalEdge(I->first, I->second, this);
Anton Korobeynikov
committed
return Changed || toSplit.size();
Owen Anderson
committed
}
Owen Anderson
committed
// iterateOnFunction - Executes one iteration of GVN
Owen Anderson
committed
// Top-down walk of the dominator tree
Owen Anderson
committed
bool changed = false;
#if 0
// Needed for value numbering with phi construction to work.
Owen Anderson
committed
ReversePostOrderTraversal<Function*> RPOT(&F);
for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
RE = RPOT.end(); RI != RE; ++RI)
changed |= processBlock(*RI);
#else
for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
DE = df_end(DT->getRootNode()); DI != DE; ++DI)
changed |= processBlock(DI->getBlock());
#endif
Owen Anderson
committed
return changed;
Owen Anderson
committed
}
void GVN::cleanupGlobalSets() {
VN.clear();
phiMap.clear();
for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
delete I->second;
localAvail.clear();
}