Skip to content
GVN.cpp 49.2 KiB
Newer Older
    if (CFP->getType() == Type::DoubleTy)
      V = ConstantExpr::getBitCast(CFP, Type::Int64Ty);
    // Don't handle long double formats, which have strange constraints.
  }
  
  // We can handle constant integers that are power of two in size and a 
  // multiple of 8 bits.
  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
    unsigned Width = CI->getBitWidth();
    if (isPowerOf2_32(Width) && Width > 8) {
      // We can handle this value if the recursive binary decomposition is the
      // same at all levels.
      APInt Val = CI->getValue();
      APInt Val2;
      while (Val.getBitWidth() != 8) {
        unsigned NextWidth = Val.getBitWidth()/2;
        Val2  = Val.lshr(NextWidth);
        Val2.trunc(Val.getBitWidth()/2);
        Val.trunc(Val.getBitWidth()/2);

        // If the top/bottom halves aren't the same, reject it.
        if (Val != Val2)
          return 0;
      }
      return ConstantInt::get(Val);
    }
  }
  
  // Conceptually, we could handle things like:
  //   %a = zext i8 %X to i16
  //   %b = shl i16 %a, 8
  //   %c = or i16 %a, %b
  // but until there is an example that actually needs this, it doesn't seem
  // worth worrying about.
  return 0;
}

/// IsPointerAtOffset - Return true if Ptr1 is exactly provably equal to Ptr2
/// plus the specified constant offset.  For example, Ptr1 might be &A[42], and
/// Ptr2 might be &A[40] and Offset might be 8.
static bool IsPointerAtOffset(Value *Ptr1, Value *Ptr2, uint64_t Offset) {
  return false;
}


/// processStore - When GVN is scanning forward over instructions, we look for
/// some other patterns to fold away.  In particular, this looks for stores to
/// neighboring locations of memory.  If it sees enough consequtive ones
/// (currently 4) it attempts to merge them together into a memcpy/memset.
bool GVN::processStore(StoreInst *SI, SmallVectorImpl<Instruction*> &toErase) {
  return false;
  
  if (SI->isVolatile()) return false;
  
  // There are two cases that are interesting for this code to handle: memcpy
  // and memset.  Right now we only handle memset.
  
  // Ensure that the value being stored is something that can be memset'able a
  // byte at a time like "0" or "-1" or any width, as well as things like
  // 0xA0A0A0A0 and 0.0.
  Value *ByteVal = isBytewiseValue(SI->getOperand(0));
  if (!ByteVal)
    return false;

  TargetData &TD = getAnalysis<TargetData>();
  AliasAnalysis &AA = getAnalysis<AliasAnalysis>();

  // Okay, so we now have a single store that can be splatable.  Try to 'grow'
  // this store by looking for neighboring stores to the immediate left or right
  // of the store we have so far.  While we could in theory handle stores in
  // this order:  A[0], A[2], A[1]
  // in practice, right now we only worry about cases where stores are
  // consequtive in increasing or decreasing address order.
  uint64_t BytesSoFar = TD.getTypeStoreSize(SI->getOperand(0)->getType());
  unsigned StartAlign = SI->getAlignment();
  Value *StartPtr = SI->getPointerOperand();
  SmallVector<StoreInst*, 16> Stores;
  Stores.push_back(SI);
  
  BasicBlock::iterator BI = SI; ++BI;
  for (++BI; !isa<TerminatorInst>(BI); ++BI) {
    if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) { 
      // If the call is readnone, ignore it, otherwise bail out.  We don't even
      // allow readonly here because we don't want something like:
      // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
      if (AA.getModRefBehavior(CallSite::get(BI)) ==
            AliasAnalysis::DoesNotAccessMemory)
        continue;
      break;
    } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI))
      break;

    // If this is a non-store instruction it is fine, ignore it.
    StoreInst *NextStore = dyn_cast<StoreInst>(BI);
    if (NextStore == 0) continue;
    
    // If this is a store, see if we can merge it in.
    if (NextStore->isVolatile()) break;
    
    // Check to see if this stored value is of the same byte-splattable value.
    if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
      break;
    
    Value *ThisPointer = NextStore->getPointerOperand();
    unsigned AccessSize = TD.getTypeStoreSize(SI->getOperand(0)->getType());
    
    // If so, check to see if the store is before the current range or after it
    // in either case, extend the range, otherwise reject it.
    if (IsPointerAtOffset(ThisPointer, StartPtr, BytesSoFar)) {
      // Okay, this extends the stored area on the end, just add to the bytes
      // so far and remember this store.
      BytesSoFar += AccessSize;
      Stores.push_back(SI);
      continue;
    }
    
    if (IsPointerAtOffset(StartPtr, ThisPointer, AccessSize)) {
      // Okay, the store is before the current range.  Reset our start pointer
      // and get new alignment info etc.
      BytesSoFar += AccessSize;
      Stores.push_back(SI);
      StartPtr = ThisPointer;
      StartAlign = NextStore->getAlignment();
      continue;
    }

    // Otherwise, this store wasn't contiguous with our current range, bail out.
    break;
  }
  
  // If we found less than 4 stores to merge, bail out, it isn't worth losing
  // type information in llvm IR to do the transformation.
  if (Stores.size() < 4) 
    return false;
  
  // Otherwise, we do want to transform this!  But not yet. :)
  
  return false;
}


/// performCallSlotOptzn - takes a memcpy and a call that it depends on,
/// and checks for the possibility of a call slot optimization by having
/// the call write its result directly into the destination of the memcpy.
bool GVN::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C,
                               SmallVectorImpl<Instruction*> &toErase) {
  // The general transformation to keep in mind is
  //
  //   call @func(..., src, ...)
  //   memcpy(dest, src, ...)
  //
  // ->
  //
  //   memcpy(dest, src, ...)
  //   call @func(..., dest, ...)
  //
  // Since moving the memcpy is technically awkward, we additionally check that
  // src only holds uninitialized values at the moment of the call, meaning that
  // the memcpy can be discarded rather than moved.
  // Deliberately get the source and destination with bitcasts stripped away,
  // because we'll need to do type comparisons based on the underlying type.
  Value* cpySrc = cpy->getSource();
  CallSite CS = CallSite::get(C);

  // We need to be able to reason about the size of the memcpy, so we require
  // that it be a constant.
  ConstantInt* cpyLength = dyn_cast<ConstantInt>(cpy->getLength());
  if (!cpyLength)

  // Require that src be an alloca.  This simplifies the reasoning considerably.
  AllocaInst* srcAlloca = dyn_cast<AllocaInst>(cpySrc);
  if (!srcAlloca)
    return false;

  // Check that all of src is copied to dest.

  ConstantInt* srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
  if (!srcArraySize)

  uint64_t srcSize = TD.getABITypeSize(srcAlloca->getAllocatedType()) *
    srcArraySize->getZExtValue();

  if (cpyLength->getZExtValue() < srcSize)

  // Check that accessing the first srcSize bytes of dest will not cause a
  // trap.  Otherwise the transform is invalid since it might cause a trap
  // to occur earlier than it otherwise would.
  if (AllocaInst* A = dyn_cast<AllocaInst>(cpyDest)) {
    // The destination is an alloca.  Check it is larger than srcSize.
    ConstantInt* destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
    if (!destArraySize)
      return false;

    uint64_t destSize = TD.getABITypeSize(A->getAllocatedType()) *
      destArraySize->getZExtValue();

    if (destSize < srcSize)
      return false;
  } else if (Argument* A = dyn_cast<Argument>(cpyDest)) {
    // If the destination is an sret parameter then only accesses that are
    // outside of the returned struct type can trap.
    if (!A->hasStructRetAttr())
      return false;

    const Type* StructTy = cast<PointerType>(A->getType())->getElementType();
    uint64_t destSize = TD.getABITypeSize(StructTy);

    if (destSize < srcSize)
      return false;
  } else {
  }

  // Check that src is not accessed except via the call and the memcpy.  This
  // guarantees that it holds only undefined values when passed in (so the final
  // memcpy can be dropped), that it is not read or written between the call and
  // the memcpy, and that writing beyond the end of it is undefined.
  SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
                                   srcAlloca->use_end());
  while (!srcUseList.empty()) {
    User* UI = srcUseList.back();
    srcUseList.pop_back();

    if (isa<GetElementPtrInst>(UI) || isa<BitCastInst>(UI)) {
      for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
           I != E; ++I)
        srcUseList.push_back(*I);
    } else if (UI != C && UI != cpy) {
      return false;
    }
  }

  // Since we're changing the parameter to the callsite, we need to make sure
  // that what would be the new parameter dominates the callsite.
  DominatorTree& DT = getAnalysis<DominatorTree>();
  if (Instruction* cpyDestInst = dyn_cast<Instruction>(cpyDest))
    if (!DT.dominates(cpyDestInst, C))
      return false;

  // In addition to knowing that the call does not access src in some
  // unexpected manner, for example via a global, which we deduce from
  // the use analysis, we also need to know that it does not sneakily
  // access dest.  We rely on AA to figure this out for us.
  if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) !=

  // All the checks have passed, so do the transformation.
  for (unsigned i = 0; i < CS.arg_size(); ++i)
    if (CS.getArgument(i) == cpySrc) {
      if (cpySrc->getType() != cpyDest->getType())
        cpyDest = CastInst::createPointerCast(cpyDest, cpySrc->getType(),
                                              cpyDest->getName(), C);
  // Drop any cached information about the call, because we may have changed
  // its dependence information by changing its parameter.
  MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
  MD.dropInstruction(C);
/// processMemCpy - perform simplication of memcpy's.  If we have memcpy A which
/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be
/// a memcpy from X to Z (or potentially a memmove, depending on circumstances).
///  This allows later passes to remove the first memcpy altogether.
bool GVN::processMemCpy(MemCpyInst* M, MemCpyInst* MDep,
                        SmallVectorImpl<Instruction*> &toErase) {
  // We can only transforms memcpy's where the dest of one is the source of the
  // other
  if (M->getSource() != MDep->getDest())
    return false;
  
  // Second, the length of the memcpy's must be the same, or the preceeding one
  // must be larger than the following one.
  ConstantInt* C1 = dyn_cast<ConstantInt>(MDep->getLength());
  ConstantInt* C2 = dyn_cast<ConstantInt>(M->getLength());
  if (!C1 || !C2)
    return false;
  
  uint64_t DepSize = C1->getValue().getZExtValue();
  uint64_t CpySize = C2->getValue().getZExtValue();
  
  if (DepSize < CpySize)
    return false;
  
  // Finally, we have to make sure that the dest of the second does not
  // alias the source of the first
  AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
  if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) !=
      AliasAnalysis::NoAlias)
    return false;
  else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) !=
           AliasAnalysis::NoAlias)
    return false;
  else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize)
           != AliasAnalysis::NoAlias)
    return false;
  
  // If all checks passed, then we can transform these memcpy's
  Function* MemCpyFun = Intrinsic::getDeclaration(
                                 M->getParent()->getParent()->getParent(),
    
  std::vector<Value*> args;
  args.push_back(M->getRawDest());
  args.push_back(MDep->getRawSource());
  args.push_back(M->getLength());
  args.push_back(M->getAlignment());
  
  CallInst* C = new CallInst(MemCpyFun, args.begin(), args.end(), "", M);
  MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
  if (MD.getDependency(C) == MDep) {
    MD.dropInstruction(M);
    toErase.push_back(M);
    return true;
  }
  
  MD.removeInstruction(C);
  toErase.push_back(C);
  return false;
/// processInstruction - When calculating availability, handle an instruction
/// by inserting it into the appropriate sets
bool GVN::processInstruction(Instruction *I, ValueNumberedSet &currAvail,
                             DenseMap<Value*, LoadInst*> &lastSeenLoad,
                             SmallVectorImpl<Instruction*> &toErase) {
  if (LoadInst* L = dyn_cast<LoadInst>(I))
    return processLoad(L, lastSeenLoad, toErase);
  if (StoreInst *SI = dyn_cast<StoreInst>(I))
    return processStore(SI, toErase);
  
  if (MemCpyInst* M = dyn_cast<MemCpyInst>(I)) {
    MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();

    // The are two possible optimizations we can do for memcpy:
    //   a) memcpy-memcpy xform which exposes redundance for DSE
    //   b) call-memcpy xform for return slot optimization
    Instruction* dep = MD.getDependency(M);
    if (dep == MemoryDependenceAnalysis::None ||
        dep == MemoryDependenceAnalysis::NonLocal)
      return false;
Chris Lattner's avatar
Chris Lattner committed
    if (MemCpyInst *MemCpy = dyn_cast<MemCpyInst>(dep))
      return processMemCpy(M, MemCpy, toErase);
    if (CallInst* C = dyn_cast<CallInst>(dep))
      return performCallSlotOptzn(M, C, toErase);
    return false;
  // Collapse PHI nodes
    Value* constVal = CollapsePhi(p);
      for (PhiMapType::iterator PI = phiMap.begin(), PE = phiMap.end();
           PI != PE; ++PI)
        if (PI->second.count(p))
          PI->second.erase(p);
      p->replaceAllUsesWith(constVal);
      toErase.push_back(p);
  // Perform value-number based elimination
    if (CallInst* CI = dyn_cast<CallInst>(I)) {
      AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
      if (!AA.doesNotAccessMemory(CI)) {
        MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
        if (cast<Instruction>(repl)->getParent() != CI->getParent() ||
            MD.getDependency(CI) != MD.getDependency(cast<CallInst>(repl))) {
          // There must be an intervening may-alias store, so nothing from
          // this point on will be able to be replaced with the preceding call
          currAvail.erase(repl);
          currAvail.insert(I);
          
          return false;
        }
      }
    }
    
    // Remove it!
    MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
    MD.removeInstruction(I);
    
    I->replaceAllUsesWith(repl);
    toErase.push_back(I);
    return true;
  } else if (!I->isTerminator()) {
    currAvail.set(num);
    currAvail.insert(I);
  }
  
  return false;
}

// GVN::runOnFunction - This is the main transformation entry point for a
// function.
//
Owen Anderson's avatar
Owen Anderson committed
bool GVN::runOnFunction(Function& F) {
  VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
  
Owen Anderson's avatar
Owen Anderson committed
  bool changed = false;
  bool shouldContinue = true;
  
  while (shouldContinue) {
    shouldContinue = iterateOnFunction(F);
    changed |= shouldContinue;
  }
  
  return changed;
}


// GVN::iterateOnFunction - Executes one iteration of GVN
bool GVN::iterateOnFunction(Function &F) {
  // Clean out global sets from any previous functions
  VN.clear();
  availableOut.clear();
 
  bool changed_function = false;
  
  DominatorTree &DT = getAnalysis<DominatorTree>();   
  
  SmallVector<Instruction*, 4> toErase;
  DenseMap<Value*, LoadInst*> lastSeenLoad;

  // Top-down walk of the dominator tree
  for (df_iterator<DomTreeNode*> DI = df_begin(DT.getRootNode()),
         E = df_end(DT.getRootNode()); DI != E; ++DI) {
    
    // Get the set to update for this block
    ValueNumberedSet& currAvail = availableOut[DI->getBlock()];     
    BasicBlock* BB = DI->getBlock();
  
    // A block inherits AVAIL_OUT from its dominator
    if (DI->getIDom() != 0)
      currAvail = availableOut[DI->getIDom()->getBlock()];

    for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
Owen Anderson's avatar
Owen Anderson committed
      changed_function |= processInstruction(BI, currAvail,
                                             lastSeenLoad, toErase);
      // Avoid iterator invalidation
      ++BI;
      for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
           E = toErase.end(); I != E; ++I)
        (*I)->eraseFromParent();