Skip to content
CodeGenPrepare.cpp 38.1 KiB
Newer Older
  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
    if (TLI)
      MadeChange |= OptimizeMemoryInst(I, SI->getOperand(1),
                                       SI->getOperand(0)->getType(),
                                       SunkAddrs);
  }

  return MadeChange;
}

// In this pass we look for GEP and cast instructions that are used
// across basic blocks and rewrite them to improve basic-block-at-a-time
// selection.
bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
  bool MadeChange = false;
  // Split all critical edges where the dest block has a PHI.
  if (CriticalEdgeSplit) {
    TerminatorInst *BBTI = BB.getTerminator();
    if (BBTI->getNumSuccessors() > 1 && !isa<IndirectBrInst>(BBTI)) {
      for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) {
        BasicBlock *SuccBB = BBTI->getSuccessor(i);
        if (isa<PHINode>(SuccBB->begin()) && isCriticalEdge(BBTI, i, true))
          SplitEdgeNicely(BBTI, i, BackEdges, this);
      }
  for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) {
    Instruction *I = BBI++;
    if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
      if (GEPI->hasAllZeroIndices()) {
        /// The GEP operand must be a pointer, so must its result -> BitCast
        Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
                                          GEPI->getName(), GEPI);
        GEPI->replaceAllUsesWith(NC);
        GEPI->eraseFromParent();
        MadeChange = true;
        BBI = NC;
      }
    } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
      // If we found an inline asm expession, and if the target knows how to
      // lower it to normal LLVM code, do so now.
      if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
        if (TLI->ExpandInlineAsm(CI)) {
          BBI = BB.begin();
          // Avoid processing instructions out of order, which could cause
          // reuse before a value is defined.
          SunkAddrs.clear();
        } else
          // Sink address computing for memory operands into the block.
          MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
      } else {
        // Other CallInst optimizations that don't need to muck with the
        // enclosing iterator here.
        MadeChange |= OptimizeCallInst(CI);