Newer
Older
Cameron Zwarich
committed
if (!Change && (isa<ZExtInst>(I) || isa<SExtInst>(I))) {
MadeChange |= MoveExtToFormExtLoad(I);
MadeChange |= OptimizeExtUses(I);
}
} else if (CmpInst *CI = dyn_cast<CmpInst>(I)) {
MadeChange |= OptimizeCmpExpression(CI);
} else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (TLI)
MadeChange |= OptimizeMemoryInst(I, I->getOperand(0), LI->getType(),
SunkAddrs);
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (TLI)
MadeChange |= OptimizeMemoryInst(I, SI->getOperand(1),
SI->getOperand(0)->getType(),
SunkAddrs);
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
if (GEPI->hasAllZeroIndices()) {
/// The GEP operand must be a pointer, so must its result -> BitCast
Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
GEPI->getName(), GEPI);
GEPI->replaceAllUsesWith(NC);
GEPI->eraseFromParent();
++NumGEPsElim;
MadeChange = true;
OptimizeInst(NC);
}
Cameron Zwarich
committed
} else if (CallInst *CI = dyn_cast<CallInst>(I)) {
if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
// Sink address computing for memory operands into the block.
MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
} else {
MadeChange |= OptimizeCallInst(CI);
}
Cameron Zwarich
committed
}
return MadeChange;
}
Chris Lattner
committed
// In this pass we look for GEP and cast instructions that are used
// across basic blocks and rewrite them to improve basic-block-at-a-time
// selection.
bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
bool MadeChange = false;
Evan Cheng
committed
// Split all critical edges where the dest block has a PHI.
Evan Cheng
committed
if (CriticalEdgeSplit) {
TerminatorInst *BBTI = BB.getTerminator();
if (BBTI->getNumSuccessors() > 1 && !isa<IndirectBrInst>(BBTI)) {
for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) {
BasicBlock *SuccBB = BBTI->getSuccessor(i);
if (isa<PHINode>(SuccBB->begin()) && isCriticalEdge(BBTI, i, true))
SplitEdgeNicely(BBTI, i, BackEdges, this);
}
Evan Cheng
committed
}
Chris Lattner
committed
}
Cameron Zwarich
committed
SunkAddrs.clear();
Chris Lattner
committed
for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) {
Instruction *I = BBI++;
if (CallInst *CI = dyn_cast<CallInst>(I)) {
// If we found an inline asm expession, and if the target knows how to
// lower it to normal LLVM code, do so now.
if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
if (TLI->ExpandInlineAsm(CI)) {
BBI = BB.begin();
// Avoid processing instructions out of order, which could cause
// reuse before a value is defined.
SunkAddrs.clear();
} else
// Sink address computing for memory operands into the block.
MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
} else {
// Other CallInst optimizations that don't need to muck with the
// enclosing iterator here.
MadeChange |= OptimizeCallInst(CI);
Cameron Zwarich
committed
} else {
MadeChange |= OptimizeInst(I);
Chris Lattner
committed
}
}
Chris Lattner
committed
return MadeChange;
}