Skip to content
GVN.cpp 42.5 KiB
Newer Older
      }
      
      // Whether we removed it or not, we can't
      // go any further
      break;
    } else if (!last) {
      // If we don't depend on a store, and we haven't
      // been loaded before, bail.
      break;
    } else if (dep == last) {
      // Remove it!
      MD.removeInstruction(L);
      
      L->replaceAllUsesWith(last);
      toErase.push_back(L);
      deletedLoad = true;
      NumGVNLoad++;
        
      break;
    } else {
      dep = MD.getDependency(L, dep);

  if (dep != MemoryDependenceAnalysis::None &&
      dep != MemoryDependenceAnalysis::NonLocal &&
      isa<AllocationInst>(dep)) {
    // Check that this load is actually from the
    // allocation we found
    Value* v = L->getOperand(0);
    while (true) {
      if (BitCastInst *BC = dyn_cast<BitCastInst>(v))
        v = BC->getOperand(0);
      else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(v))
        v = GEP->getOperand(0);
      else
        break;
    }
    if (v == dep) {
      // If this load depends directly on an allocation, there isn't
      // anything stored there; therefore, we can optimize this load
      // to undef.
      MD.removeInstruction(L);

      L->replaceAllUsesWith(UndefValue::get(L->getType()));
      toErase.push_back(L);
      deletedLoad = true;
      NumGVNLoad++;
    }
  }

/// isReturnSlotOptznProfitable - Determine if performing a return slot 
/// fusion with the slot dest is profitable
static bool isReturnSlotOptznProfitable(Value* dest, MemCpyInst* cpy) {
  // We currently consider it profitable if dest is otherwise dead.
  SmallVector<User*, 8> useList(dest->use_begin(), dest->use_end());
  while (!useList.empty()) {
    User* UI = useList.back();
    
    if (isa<GetElementPtrInst>(UI) || isa<BitCastInst>(UI)) {
      useList.pop_back();
      for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
           I != E; ++I)
        useList.push_back(*I);
    } else if (UI == cpy)
      useList.pop_back();
    else
      return false;
  }
  
  return true;
}

/// performReturnSlotOptzn - takes a memcpy and a call that it depends on,
/// and checks for the possibility of a return slot optimization by having
/// the call write its result directly into the callees return parameter
/// rather than using memcpy
bool GVN::performReturnSlotOptzn(MemCpyInst* cpy, CallInst* C,
                                 SmallVector<Instruction*, 4>& toErase) {
  // Deliberately get the source and destination with bitcasts stripped away,
  // because we'll need to do type comparisons based on the underlying type.
  Value* cpySrc = cpy->getSource();
  CallSite CS = CallSite::get(C);
  // Since this is a return slot optimization, we need to make sure that
  // the value being copied is, in fact, in a return slot.  We also need to
  // check that the return slot parameter is marked noalias, so that we can
Owen Anderson's avatar
Owen Anderson committed
  // be sure that changing it will not cause unexpected behavior changes due
  // to it being accessed through a global or another parameter.
  if (CS.arg_size() == 0 ||
      cpySrc != CS.getArgument(0) ||
      !CS.paramHasAttr(1, ParamAttr::NoAlias | ParamAttr::StructRet))
  // Check that something sneaky is not happening involving casting
  // return slot types around.
  if (CS.getArgument(0)->getType() != cpyDest->getType())
  // sret --> pointer
  const PointerType* PT = cast<PointerType>(cpyDest->getType()); 
  // We can only perform the transformation if the size of the memcpy
  // is constant and equal to the size of the structure.
  ConstantInt* cpyLength = dyn_cast<ConstantInt>(cpy->getLength());
  if (!cpyLength)
  if (TD.getTypeStoreSize(PT->getElementType()) != cpyLength->getZExtValue())
    return false;
  
  // We only perform the transformation if it will be profitable. 
  if (!isReturnSlotOptznProfitable(cpyDest, cpy))
    return false;
  
  // In addition to knowing that the call does not access the return slot
  // in some unexpected manner, which we derive from the noalias attribute,
  // we also need to know that it does not sneakily modify the destination
  // slot in the caller.  We don't have parameter attributes to go by
  // for this one, so we just rely on AA to figure it out for us.
  AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
  if (AA.getModRefInfo(C, cpy->getRawDest(), cpyLength->getZExtValue()) !=
      AliasAnalysis::NoModRef)
    return false;
  
  // If all the checks have passed, then we're alright to do the transformation.
  // Drop any cached information about the call, because we may have changed
  // its dependence information by changing its parameter.
  MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
  MD.dropInstruction(C);
  
  // Remove the memcpy
/// processMemCpy - perform simplication of memcpy's.  If we have memcpy A which
/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be
/// a memcpy from X to Z (or potentially a memmove, depending on circumstances).
///  This allows later passes to remove the first memcpy altogether.
bool GVN::processMemCpy(MemCpyInst* M, MemCpyInst* MDep,
                        SmallVector<Instruction*, 4>& toErase) {
  // We can only transforms memcpy's where the dest of one is the source of the
  // other
  if (M->getSource() != MDep->getDest())
    return false;
  
  // Second, the length of the memcpy's must be the same, or the preceeding one
  // must be larger than the following one.
  ConstantInt* C1 = dyn_cast<ConstantInt>(MDep->getLength());
  ConstantInt* C2 = dyn_cast<ConstantInt>(M->getLength());
  if (!C1 || !C2)
    return false;
  
  uint64_t CpySize = C1->getValue().getZExtValue();
  uint64_t DepSize = C2->getValue().getZExtValue();
  
  if (DepSize < CpySize)
    return false;
  
  // Finally, we have to make sure that the dest of the second does not
  // alias the source of the first
  AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
  if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) !=
      AliasAnalysis::NoAlias)
    return false;
  else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) !=
           AliasAnalysis::NoAlias)
    return false;
  else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize)
           != AliasAnalysis::NoAlias)
    return false;
  
  // If all checks passed, then we can transform these memcpy's
  Function* MemCpyFun = Intrinsic::getDeclaration(
                                 M->getParent()->getParent()->getParent(),
    
  std::vector<Value*> args;
  args.push_back(M->getRawDest());
  args.push_back(MDep->getRawSource());
  args.push_back(M->getLength());
  args.push_back(M->getAlignment());
  
  CallInst* C = new CallInst(MemCpyFun, args.begin(), args.end(), "", M);
  MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
  if (MD.getDependency(C) == MDep) {
    MD.dropInstruction(M);
    toErase.push_back(M);
    return true;
  } else {
    MD.removeInstruction(C);
    toErase.push_back(C);
    return false;
  }
}

/// processInstruction - When calculating availability, handle an instruction
/// by inserting it into the appropriate sets
bool GVN::processInstruction(Instruction* I,
                                ValueNumberedSet& currAvail,
                                DenseMap<Value*, LoadInst*>& lastSeenLoad,
                                SmallVector<Instruction*, 4>& toErase) {
  if (LoadInst* L = dyn_cast<LoadInst>(I)) {
    return processLoad(L, lastSeenLoad, toErase);
  } else if (MemCpyInst* M = dyn_cast<MemCpyInst>(I)) {
    MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();

    // The are two possible optimizations we can do for memcpy:
    //   a) memcpy-memcpy xform which exposes redundance for DSE
    //   b) call-memcpy xform for sret return slot optimization
    Instruction* dep = MD.getDependency(M);
    if (dep == MemoryDependenceAnalysis::None ||
        dep == MemoryDependenceAnalysis::NonLocal)
      return false;
Chris Lattner's avatar
Chris Lattner committed
    if (MemCpyInst *MemCpy = dyn_cast<MemCpyInst>(dep))
      return processMemCpy(M, MemCpy, toErase);
    if (CallInst* C = dyn_cast<CallInst>(dep))
      return performReturnSlotOptzn(M, C, toErase);
    return false;
  // Collapse PHI nodes
    Value* constVal = CollapsePhi(p);
      for (PhiMapType::iterator PI = phiMap.begin(), PE = phiMap.end();
           PI != PE; ++PI)
        if (PI->second.count(p))
          PI->second.erase(p);
      p->replaceAllUsesWith(constVal);
      toErase.push_back(p);
  // Perform value-number based elimination
    if (CallInst* CI = dyn_cast<CallInst>(I)) {
      AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
      if (!AA.doesNotAccessMemory(CI)) {
        MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
        if (cast<Instruction>(repl)->getParent() != CI->getParent() ||
            MD.getDependency(CI) != MD.getDependency(cast<CallInst>(repl))) {
          // There must be an intervening may-alias store, so nothing from
          // this point on will be able to be replaced with the preceding call
          currAvail.erase(repl);
          currAvail.insert(I);
          
          return false;
        }
      }
    }
    
    // Remove it!
    MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
    MD.removeInstruction(I);
    
    I->replaceAllUsesWith(repl);
    toErase.push_back(I);
    return true;
  } else if (!I->isTerminator()) {
    currAvail.set(num);
    currAvail.insert(I);
  }
  
  return false;
}

// GVN::runOnFunction - This is the main transformation entry point for a
// function.
//
Owen Anderson's avatar
Owen Anderson committed
bool GVN::runOnFunction(Function& F) {
  VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
  
Owen Anderson's avatar
Owen Anderson committed
  bool changed = false;
  bool shouldContinue = true;
  
  while (shouldContinue) {
    shouldContinue = iterateOnFunction(F);
    changed |= shouldContinue;
  }
  
  return changed;
}


// GVN::iterateOnFunction - Executes one iteration of GVN
bool GVN::iterateOnFunction(Function &F) {
  // Clean out global sets from any previous functions
  VN.clear();
  availableOut.clear();
 
  bool changed_function = false;
  
  DominatorTree &DT = getAnalysis<DominatorTree>();   
  
  SmallVector<Instruction*, 4> toErase;
  
  // Top-down walk of the dominator tree
  for (df_iterator<DomTreeNode*> DI = df_begin(DT.getRootNode()),
         E = df_end(DT.getRootNode()); DI != E; ++DI) {
    
    // Get the set to update for this block
    ValueNumberedSet& currAvail = availableOut[DI->getBlock()];     
    DenseMap<Value*, LoadInst*> lastSeenLoad;
    
    BasicBlock* BB = DI->getBlock();
  
    // A block inherits AVAIL_OUT from its dominator
    if (DI->getIDom() != 0)
      currAvail = availableOut[DI->getIDom()->getBlock()];

    for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
Owen Anderson's avatar
Owen Anderson committed
      changed_function |= processInstruction(BI, currAvail,
                                             lastSeenLoad, toErase);
      // Avoid iterator invalidation
      ++BI;
      for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
        (*I)->eraseFromParent();