Skip to content
GVN.cpp 59.7 KiB
Newer Older
      ValuesPerBlock.push_back(std::make_pair(DepBB, LD));
      UnavailableBlocks.push_back(DepBB);
      continue;
  // If we have no predecessors that produce a known value for this load, exit
  // early.
  if (ValuesPerBlock.empty()) return false;
  
  // If all of the instructions we depend on produce a known value for this
  // load, then it is fully redundant and we can use PHI insertion to compute
  // its value.  Insert PHIs and remove the fully redundant value now.
  if (UnavailableBlocks.empty()) {
    // Use cached PHI construction information from previous runs
    SmallPtrSet<Instruction*, 4> &p = phiMap[LI->getPointerOperand()];
    // FIXME: What does phiMap do? Are we positive it isn't getting invalidated?
    for (SmallPtrSet<Instruction*, 4>::iterator I = p.begin(), E = p.end();
         I != E; ++I) {
      if ((*I)->getParent() == LI->getParent()) {
        DEBUG(cerr << "GVN REMOVING NONLOCAL LOAD #1: " << *LI);
        LI->replaceAllUsesWith(*I);
        if (isa<PointerType>((*I)->getType()))
          MD->invalidateCachedPointerInfo(*I);
        toErase.push_back(LI);
        NumGVNLoad++;
        return true;
      }
      
      ValuesPerBlock.push_back(std::make_pair((*I)->getParent(), *I));
    DEBUG(cerr << "GVN REMOVING NONLOCAL LOAD: " << *LI);
    
    DenseMap<BasicBlock*, Value*> BlockReplValues;
    BlockReplValues.insert(ValuesPerBlock.begin(), ValuesPerBlock.end());
    // Perform PHI construction.
    Value* v = GetValueForBlock(LI->getParent(), LI, BlockReplValues, true);
    LI->replaceAllUsesWith(v);
    if (isa<PointerType>(v->getType()))
      MD->invalidateCachedPointerInfo(v);
    toErase.push_back(LI);
    NumGVNLoad++;
    return true;
  
  if (!EnablePRE || !EnableLoadPRE)
    return false;
  // Okay, we have *some* definitions of the value.  This means that the value
  // is available in some of our (transitive) predecessors.  Lets think about
  // doing PRE of this load.  This will involve inserting a new load into the
  // predecessor when it's not available.  We could do this in general, but
  // prefer to not increase code size.  As such, we only do this when we know
  // that we only have to insert *one* load (which means we're basically moving
  // the load, not inserting a new one).
  
  SmallPtrSet<BasicBlock *, 4> Blockers;
  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
    Blockers.insert(UnavailableBlocks[i]);

  // Lets find first basic block with more than one predecessor.  Walk backwards
  // through predecessors if needed.
  BasicBlock *LoadBB = LI->getParent();
  BasicBlock *TmpBB = LoadBB;

  bool isSinglePred = false;
  while (TmpBB->getSinglePredecessor()) {
    isSinglePred = true;
    TmpBB = TmpBB->getSinglePredecessor();
    if (!TmpBB) // If haven't found any, bail now.
      return false;
    if (TmpBB == LoadBB) // Infinite (unreachable) loop.
      return false;
    if (Blockers.count(TmpBB))
      return false;
  }
  
  assert(TmpBB);
  LoadBB = TmpBB;
  
  // If we have a repl set with LI itself in it, this means we have a loop where
  // at least one of the values is LI.  Since this means that we won't be able
  // to eliminate LI even if we insert uses in the other predecessors, we will
  // end up increasing code size.  Reject this by scanning for LI.
  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
    if (ValuesPerBlock[i].second == LI)
      return false;
  
  if (isSinglePred) {
    bool isHot = false;
    for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
      if (Instruction *I = dyn_cast<Instruction>(ValuesPerBlock[i].second))
	// "Hot" Instruction is in some loop (because it dominates its dep. 
	// instruction).
	if (DT->dominates(LI, I)) { 
	  isHot = true;
	  break;
	}

    // We are interested only in "hot" instructions. We don't want to do any
    // mis-optimizations here.
    if (!isHot)
      return false;
  }

  // Okay, we have some hope :).  Check to see if the loaded value is fully
  // available in all but one predecessor.
  // FIXME: If we could restructure the CFG, we could make a common pred with
  // all the preds that don't have an available LI and insert a new load into
  // that one block.
  BasicBlock *UnavailablePred = 0;

  DenseMap<BasicBlock*, char> FullyAvailableBlocks;
  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
    FullyAvailableBlocks[ValuesPerBlock[i].first] = true;
  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
    FullyAvailableBlocks[UnavailableBlocks[i]] = false;
  for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
       PI != E; ++PI) {
    if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
      continue;
    
    // If this load is not available in multiple predecessors, reject it.
    if (UnavailablePred && UnavailablePred != *PI)
      return false;
    UnavailablePred = *PI;
  }
  
  assert(UnavailablePred != 0 &&
         "Fully available value should be eliminated above!");
  
  // If the loaded pointer is PHI node defined in this block, do PHI translation
  // to get its value in the predecessor.
  Value *LoadPtr = LI->getOperand(0)->DoPHITranslation(LoadBB, UnavailablePred);
  
  // Make sure the value is live in the predecessor.  If it was defined by a
  // non-PHI instruction in this block, we don't know how to recompute it above.
  if (Instruction *LPInst = dyn_cast<Instruction>(LoadPtr))
    if (!DT->dominates(LPInst->getParent(), UnavailablePred)) {
      DEBUG(cerr << "COULDN'T PRE LOAD BECAUSE PTR IS UNAVAILABLE IN PRED: "
                 << *LPInst << *LI << "\n");
      return false;
    }
  
  // We don't currently handle critical edges :(
  if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) {
    DEBUG(cerr << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
                << UnavailablePred->getName() << "': " << *LI);
    return false;
  }
  // Okay, we can eliminate this load by inserting a reload in the predecessor
  // and using PHI construction to get the value in the other predecessors, do
  // it.
  DEBUG(cerr << "GVN REMOVING PRE LOAD: " << *LI);
  
  Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
                                LI->getAlignment(),
                                UnavailablePred->getTerminator());
  
  SmallPtrSet<Instruction*, 4> &p = phiMap[LI->getPointerOperand()];
  for (SmallPtrSet<Instruction*, 4>::iterator I = p.begin(), E = p.end();
       I != E; ++I)
    ValuesPerBlock.push_back(std::make_pair((*I)->getParent(), *I));
  
  DenseMap<BasicBlock*, Value*> BlockReplValues;
  BlockReplValues.insert(ValuesPerBlock.begin(), ValuesPerBlock.end());
  BlockReplValues[UnavailablePred] = NewLoad;
  
  // Perform PHI construction.
  Value* v = GetValueForBlock(LI->getParent(), LI, BlockReplValues, true);
  LI->replaceAllUsesWith(v);
  if (isa<PointerType>(v->getType()))
    MD->invalidateCachedPointerInfo(v);
  toErase.push_back(LI);
  NumPRELoad++;
/// processLoad - Attempt to eliminate a load, first by eliminating it
/// locally, and then attempting non-local elimination if that fails.
bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
  if (L->isVolatile())
    return false;
  
  Value* pointer = L->getPointerOperand();
  // ... to a pointer that has been loaded from before...
  MemDepResult dep = MD->getDependency(L);
  
  // If the value isn't available, don't do anything!
  if (dep.isClobber()) {
    DEBUG(
      // fast print dep, using operator<< on instruction would be too slow
      DOUT << "GVN: load ";
      WriteAsOperand(*DOUT.stream(), L);
      Instruction *I = dep.getInst();
      DOUT << " is clobbered by " << *I;

  // If it is defined in another block, try harder.
  if (dep.isNonLocal())
    return processNonLocalLoad(L, toErase);

  Instruction *DepInst = dep.getInst();
  if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
    // Only forward substitute stores to loads of the same type.
    // FIXME: Could do better!
    if (DepSI->getPointerOperand()->getType() != pointer->getType())
      return false;
    // Remove it!
    L->replaceAllUsesWith(DepSI->getOperand(0));
    if (isa<PointerType>(DepSI->getOperand(0)->getType()))
      MD->invalidateCachedPointerInfo(DepSI->getOperand(0));
    toErase.push_back(L);
    NumGVNLoad++;
    return true;
  }

  if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
    // Only forward substitute stores to loads of the same type.
    // FIXME: Could do better! load i32 -> load i8 -> truncate on little endian.
    if (DepLI->getType() != L->getType())
      return false;
    // Remove it!
    L->replaceAllUsesWith(DepLI);
    if (isa<PointerType>(DepLI->getType()))
      MD->invalidateCachedPointerInfo(DepLI);
    toErase.push_back(L);
    NumGVNLoad++;
    return true;
  // If this load really doesn't depend on anything, then we must be loading an
  // undef value.  This can happen when loading for a fresh allocation with no
  // intervening stores, for example.
  if (isa<AllocationInst>(DepInst)) {
    L->replaceAllUsesWith(UndefValue::get(L->getType()));
    toErase.push_back(L);
    NumGVNLoad++;
Value* GVN::lookupNumber(BasicBlock* BB, uint32_t num) {
  DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
  if (I == localAvail.end())
    return 0;
  
  ValueNumberScope* locals = I->second;
  
  while (locals) {
    DenseMap<uint32_t, Value*>::iterator I = locals->table.find(num);
    if (I != locals->table.end())
      return I->second;
    else
      locals = locals->parent;
  }
  
  return 0;
}

/// AttemptRedundancyElimination - If the "fast path" of redundancy elimination
/// by inheritance from the dominator fails, see if we can perform phi 
/// construction to eliminate the redundancy.
Value* GVN::AttemptRedundancyElimination(Instruction* orig, unsigned valno) {
  BasicBlock* BaseBlock = orig->getParent();
  
  SmallPtrSet<BasicBlock*, 4> Visited;
  SmallVector<BasicBlock*, 8> Stack;
  Stack.push_back(BaseBlock);
  
  DenseMap<BasicBlock*, Value*> Results;
  
  // Walk backwards through our predecessors, looking for instances of the
  // value number we're looking for.  Instances are recorded in the Results
  // map, which is then used to perform phi construction.
  while (!Stack.empty()) {
    BasicBlock* Current = Stack.back();
    Stack.pop_back();
    
    // If we've walked all the way to a proper dominator, then give up. Cases
    // where the instance is in the dominator will have been caught by the fast
    // path, and any cases that require phi construction further than this are
    // probably not worth it anyways.  Note that this is a SIGNIFICANT compile
    // time improvement.
    if (DT->properlyDominates(Current, orig->getParent())) return 0;
    
    DenseMap<BasicBlock*, ValueNumberScope*>::iterator LA =
                                                       localAvail.find(Current);
    if (LA == localAvail.end()) return 0;
    DenseMap<uint32_t, Value*>::iterator V = LA->second->table.find(valno);
    
    if (V != LA->second->table.end()) {
      // Found an instance, record it.
      Results.insert(std::make_pair(Current, V->second));
      continue;
    }
    
    // If we reach the beginning of the function, then give up.
    if (pred_begin(Current) == pred_end(Current))
      return 0;
    
    for (pred_iterator PI = pred_begin(Current), PE = pred_end(Current);
         PI != PE; ++PI)
      if (Visited.insert(*PI))
        Stack.push_back(*PI);
  }
  
  // If we didn't find instances, give up.  Otherwise, perform phi construction.
  if (Results.size() == 0)
    return 0;
  else
    return GetValueForBlock(BaseBlock, orig, Results, true);
}

/// processInstruction - When calculating availability, handle an instruction
/// by inserting it into the appropriate sets
bool GVN::processInstruction(Instruction *I,
                             SmallVectorImpl<Instruction*> &toErase) {
  if (LoadInst* L = dyn_cast<LoadInst>(I)) {
    bool changed = processLoad(L, toErase);
    
    if (!changed) {
      unsigned num = VN.lookup_or_add(L);
      localAvail[I->getParent()]->table.insert(std::make_pair(num, L));
  uint32_t nextNum = VN.getNextUnusedValueNumber();
  if (BranchInst* BI = dyn_cast<BranchInst>(I)) {
    localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
    
    if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
      return false;
    
    Value* branchCond = BI->getCondition();
    uint32_t condVN = VN.lookup_or_add(branchCond);
    
    BasicBlock* trueSucc = BI->getSuccessor(0);
    BasicBlock* falseSucc = BI->getSuccessor(1);
    
    if (trueSucc->getSinglePredecessor())
      localAvail[trueSucc]->table[condVN] = ConstantInt::getTrue();
    if (falseSucc->getSinglePredecessor())
      localAvail[falseSucc]->table[condVN] = ConstantInt::getFalse();

    return false;
    
  // Allocations are always uniquely numbered, so we can save time and memory
  // by fast failing them.  
  } else if (isa<AllocationInst>(I) || isa<TerminatorInst>(I)) {
    localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
  // Collapse PHI nodes
    Value* constVal = CollapsePhi(p);
      for (PhiMapType::iterator PI = phiMap.begin(), PE = phiMap.end();
           PI != PE; ++PI)
        PI->second.erase(p);
      p->replaceAllUsesWith(constVal);
      if (isa<PointerType>(constVal->getType()))
        MD->invalidateCachedPointerInfo(constVal);
      toErase.push_back(p);
      localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
  
  // If the number we were assigned was a brand new VN, then we don't
  // need to do a lookup to see if the number already exists
  // somewhere in the domtree: it can't!
  } else if (num == nextNum) {
    localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
    
  // Perform fast-path value-number based elimination of values inherited from
  // dominators.
  } else if (Value* repl = lookupNumber(I->getParent(), num)) {
    if (isa<PointerType>(repl->getType()))
      MD->invalidateCachedPointerInfo(repl);

#if 0
  // Perform slow-pathvalue-number based elimination with phi construction.
  } else if (Value* repl = AttemptRedundancyElimination(I, num)) {
    // Remove it!
    VN.erase(I);
    I->replaceAllUsesWith(repl);
    if (isa<PointerType>(repl->getType()))
      MD->invalidateCachedPointerInfo(repl);
    toErase.push_back(I);
    return true;
#endif
    localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
/// runOnFunction - This is the main transformation entry point for a function.
Owen Anderson's avatar
Owen Anderson committed
bool GVN::runOnFunction(Function& F) {
  MD = &getAnalysis<MemoryDependenceAnalysis>();
  DT = &getAnalysis<DominatorTree>();
  VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
  VN.setMemDep(MD);
  VN.setDomTree(DT);
Owen Anderson's avatar
Owen Anderson committed
  bool changed = false;
  bool shouldContinue = true;
  
  // Merge unconditional branches, allowing PRE to catch more
  // optimization opportunities.
  for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
    BasicBlock* BB = FI;
    ++FI;
    bool removedBlock = MergeBlockIntoPredecessor(BB, this);
    if (removedBlock) NumGVNBlocks++;
    
    changed |= removedBlock;
  unsigned Iteration = 0;
  
Owen Anderson's avatar
Owen Anderson committed
  while (shouldContinue) {
    DEBUG(cerr << "GVN iteration: " << Iteration << "\n");
Owen Anderson's avatar
Owen Anderson committed
    shouldContinue = iterateOnFunction(F);
    changed |= shouldContinue;
    bool PREChanged = true;
    while (PREChanged) {
      PREChanged = performPRE(F);
  // FIXME: Should perform GVN again after PRE does something.  PRE can move
  // computations into blocks where they become fully redundant.  Note that
  // we can't do this until PRE's critical edge splitting updates memdep.
  // Actually, when this happens, we should just fully integrate PRE into GVN.
Owen Anderson's avatar
Owen Anderson committed
  return changed;
}


  // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
  // incrementing BI before processing an instruction).
  SmallVector<Instruction*, 8> toErase;
  bool changed_function = false;
  for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
       BI != BE;) {
    changed_function |= processInstruction(BI, toErase);
    if (toErase.empty()) {
      ++BI;
      continue;
    }
    
    // If we need some instructions deleted, do it now.
    NumGVNInstr += toErase.size();
    
    // Avoid iterator invalidation.
    bool AtStart = BI == BB->begin();
    if (!AtStart)
      --BI;

    for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
         E = toErase.end(); I != E; ++I) {
      DEBUG(cerr << "GVN removed: " << **I);
      MD->removeInstruction(*I);
      (*I)->eraseFromParent();
      DEBUG(verifyRemoved(*I));
    toErase.clear();

    if (AtStart)
      BI = BB->begin();
    else
      ++BI;
  }
  
  return changed_function;
}

/// performPRE - Perform a purely local form of PRE that looks for diamond
/// control flow patterns and attempts to perform simple PRE at the join point.
bool GVN::performPRE(Function& F) {
  SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
  DenseMap<BasicBlock*, Value*> predMap;
  for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
       DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
    BasicBlock* CurrentBlock = *DI;
    
    // Nothing to PRE in the entry block.
    if (CurrentBlock == &F.getEntryBlock()) continue;
    
    for (BasicBlock::iterator BI = CurrentBlock->begin(),
         BE = CurrentBlock->end(); BI != BE; ) {
      if (isa<AllocationInst>(CurInst) || isa<TerminatorInst>(CurInst) ||
          isa<PHINode>(CurInst) || (CurInst->getType() == Type::VoidTy) ||
          CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
          isa<DbgInfoIntrinsic>(CurInst))
      
      // Look for the predecessors for PRE opportunities.  We're
      // only trying to solve the basic diamond case, where
      // a value is computed in the successor and one predecessor,
      // but not the other.  We also explicitly disallow cases
      // where the successor is its own predecessor, because they're
      // more complicated to get right.
      unsigned numWith = 0;
      unsigned numWithout = 0;
      BasicBlock* PREPred = 0;
      for (pred_iterator PI = pred_begin(CurrentBlock),
           PE = pred_end(CurrentBlock); PI != PE; ++PI) {
        // We're not interested in PRE where the block is its
        // own predecessor, on in blocks with predecessors
        // that are not reachable.
        if (*PI == CurrentBlock) {
          break;
        } else if (!localAvail.count(*PI))  {
          numWithout = 2;
          break;
        }
        
        DenseMap<uint32_t, Value*>::iterator predV = 
                                            localAvail[*PI]->table.find(valno);
        if (predV == localAvail[*PI]->table.end()) {
          numWith++;
        }
      }
      
      // Don't do PRE when it might increase code size, i.e. when
      // we would need to insert instructions in more than one pred.
      // We can't do PRE safely on a critical edge, so instead we schedule
      // the edge to be split and perform the PRE the next time we iterate
      // on the function.
      unsigned succNum = 0;
      for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors();
           i != e; ++i)
        if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) {
          succNum = i;
          break;
        }
        
      if (isCriticalEdge(PREPred->getTerminator(), succNum)) {
        toSplit.push_back(std::make_pair(PREPred->getTerminator(), succNum));
        continue;
      }
      
      // Instantiate the expression the in predecessor that lacked it.
      // Because we are going top-down through the block, all value numbers
      // will be available in the predecessor by the time we need them.  Any
      // that weren't original present will have been instantiated earlier
      // in this loop.
      for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
        Value *Op = PREInstr->getOperand(i);
        if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
          continue;
        
        if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
          PREInstr->setOperand(i, V);
        } else {
          success = false;
          break;
      }
      
      // Fail out if we encounter an operand that is not available in
      // the PRE predecessor.  This is typically because of loads which 
      // are not value numbered precisely.
      if (!success) {
        delete PREInstr;
        continue;
      }
      
      PREInstr->insertBefore(PREPred->getTerminator());
      VN.add(PREInstr, valno);
      NumGVNPRE++;
      
      // Update the availability map to include the new instruction.
      localAvail[PREPred]->table.insert(std::make_pair(valno, PREInstr));
      
      // Create a PHI to make the value available in this block.
      PHINode* Phi = PHINode::Create(CurInst->getType(),
                                     CurInst->getName() + ".pre-phi",
                                     CurrentBlock->begin());
      for (pred_iterator PI = pred_begin(CurrentBlock),
           PE = pred_end(CurrentBlock); PI != PE; ++PI)
      localAvail[CurrentBlock]->table[valno] = Phi;
      if (isa<PointerType>(Phi->getType()))
        MD->invalidateCachedPointerInfo(Phi);
      DEBUG(cerr << "GVN PRE removed: " << *CurInst);
      MD->removeInstruction(CurInst);
      CurInst->eraseFromParent();
      DEBUG(verifyRemoved(CurInst));
  for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator
       I = toSplit.begin(), E = toSplit.end(); I != E; ++I)
    SplitCriticalEdge(I->first, I->second, this);
  
/// iterateOnFunction - Executes one iteration of GVN
Owen Anderson's avatar
Owen Anderson committed
bool GVN::iterateOnFunction(Function &F) {
  cleanupGlobalSets();
  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
       DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
    if (DI->getIDom())
      localAvail[DI->getBlock()] =
                   new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
    else
      localAvail[DI->getBlock()] = new ValueNumberScope(0);
  }

  // Top-down walk of the dominator tree
#if 0
  // Needed for value numbering with phi construction to work.
  ReversePostOrderTraversal<Function*> RPOT(&F);
  for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
       RE = RPOT.end(); RI != RE; ++RI)
    changed |= processBlock(*RI);
#else
  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
       DE = df_end(DT->getRootNode()); DI != DE; ++DI)
    changed |= processBlock(DI->getBlock());
#endif


void GVN::cleanupGlobalSets() {
  VN.clear();
  phiMap.clear();

  for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
       I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
    delete I->second;
  localAvail.clear();
}

/// verifyRemoved - Verify that the specified instruction does not occur in our
/// internal data structures.
void GVN::verifyRemoved(const Instruction *Inst) const {
  VN.verifyRemoved(Inst);

  // Walk through the PHI map to make sure the instruction isn't hiding in there
  // somewhere.
  for (PhiMapType::iterator
         I = phiMap.begin(), E = phiMap.end(); I != E; ++I) {
    assert(I->first != Inst && "Inst is still a key in PHI map!");

    for (SmallPtrSet<Instruction*, 4>::iterator
           II = I->second.begin(), IE = I->second.end(); II != IE; ++II) {
      assert(*II != Inst && "Inst is still a value in PHI map!");
    }
  }

  // Walk through the value number scope to make sure the instruction isn't
  // ferreted away in it.
  for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
         I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
    const ValueNumberScope *VNS = I->second;

    while (VNS) {
      for (DenseMap<uint32_t, Value*>::iterator
             II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
        assert(II->second != Inst && "Inst still in value numbering scope!");
      }

      VNS = VNS->parent;