Newer
Older
if (isa<StructType>(L->getType()) || isa<ArrayType>(L->getType()) ||
isa<StructType>(DepSI->getOperand(0)->getType()) ||
isa<ArrayType>(DepSI->getOperand(0)->getType()))
return -1;
int64_t StoreOffset = 0, LoadOffset = 0;
Value *StoreBase =
GetBaseWithConstantOffset(DepSI->getPointerOperand(), StoreOffset, TD);
GetBaseWithConstantOffset(L->getPointerOperand(), LoadOffset, TD);
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
if (StoreBase != LoadBase)
return -1;
// If the load and store are to the exact same address, they should have been
// a must alias. AA must have gotten confused.
// FIXME: Study to see if/when this happens.
if (LoadOffset == StoreOffset) {
#if 0
errs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
<< "Base = " << *StoreBase << "\n"
<< "Store Ptr = " << *DepSI->getPointerOperand() << "\n"
<< "Store Offs = " << StoreOffset << " - " << *DepSI << "\n"
<< "Load Ptr = " << *L->getPointerOperand() << "\n"
<< "Load Offs = " << LoadOffset << " - " << *L << "\n\n";
errs() << "'" << L->getParent()->getParent()->getName() << "'"
<< *L->getParent();
#endif
return -1;
}
// If the load and store don't overlap at all, the store doesn't provide
// anything to the load. In this case, they really don't alias at all, AA
// must have gotten confused.
// FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
// remove this check, as it is duplicated with what we have below.
uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
uint64_t LoadSize = TD.getTypeSizeInBits(L->getType());
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
if ((StoreSize & 7) | (LoadSize & 7))
return -1;
StoreSize >>= 3; // Convert to bytes.
LoadSize >>= 3;
bool isAAFailure = false;
if (StoreOffset < LoadOffset) {
isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
} else {
isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
}
if (isAAFailure) {
#if 0
errs() << "STORE LOAD DEP WITH COMMON BASE:\n"
<< "Base = " << *StoreBase << "\n"
<< "Store Ptr = " << *DepSI->getPointerOperand() << "\n"
<< "Store Offs = " << StoreOffset << " - " << *DepSI << "\n"
<< "Load Ptr = " << *L->getPointerOperand() << "\n"
<< "Load Offs = " << LoadOffset << " - " << *L << "\n\n";
errs() << "'" << L->getParent()->getParent()->getName() << "'"
<< *L->getParent();
#endif
return -1;
}
// If the Load isn't completely contained within the stored bits, we don't
// have all the bits to feed it. We could do something crazy in the future
// (issue a smaller load then merge the bits in) but this seems unlikely to be
// valuable.
if (StoreOffset > LoadOffset ||
StoreOffset+StoreSize < LoadOffset+LoadSize)
return -1;
// Okay, we can do this transformation. Return the number of bytes into the
// store that the load is.
return LoadOffset-StoreOffset;
}
/// GetStoreValueForLoad - This function is called when we have a
/// memdep query of a load that ends up being a clobbering store. This means
/// that the store *may* provide bits used by the load but we can't be sure
/// because the pointers don't mustalias. Check this case to see if there is
/// anything more we can do before we give up.
static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
const Type *LoadTy,
Instruction *InsertPt, const TargetData &TD){
LLVMContext &Ctx = SrcVal->getType()->getContext();
uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8;
uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
// Compute which bits of the stored value are being used by the load. Convert
// to an integer type to start with.
if (isa<PointerType>(SrcVal->getType()))
SrcVal = new PtrToIntInst(SrcVal, TD.getIntPtrType(Ctx), "tmp", InsertPt);
if (!isa<IntegerType>(SrcVal->getType()))
SrcVal = new BitCastInst(SrcVal, IntegerType::get(Ctx, StoreSize*8),
"tmp", InsertPt);
// Shift the bits to the least significant depending on endianness.
unsigned ShiftAmt;
if (TD.isLittleEndian()) {
ShiftAmt = (StoreSize-LoadSize-Offset)*8;
if (ShiftAmt)
SrcVal = BinaryOperator::CreateLShr(SrcVal,
ConstantInt::get(SrcVal->getType(), ShiftAmt), "tmp", InsertPt);
if (LoadSize != StoreSize)
SrcVal = new TruncInst(SrcVal, IntegerType::get(Ctx, LoadSize*8),
"tmp", InsertPt);
return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
struct AvailableValueInBlock {
/// BB - The basic block in question.
BasicBlock *BB;
/// V - The value that is live out of the block.
Value *V;
/// Offset - The byte offset in V that is interesting for the load query.
unsigned Offset;
static AvailableValueInBlock get(BasicBlock *BB, Value *V,
unsigned Offset = 0) {
AvailableValueInBlock Res;
Res.BB = BB;
Res.V = V;
Res.Offset = Offset;
return Res;
}
};
/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
/// construct SSA form, allowing us to eliminate LI. This returns the value
/// that should be used at LI's definition site.
static Value *ConstructSSAForLoadSet(LoadInst *LI,
SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
const TargetData *TD,
AliasAnalysis *AA) {
SmallVector<PHINode*, 8> NewPHIs;
SSAUpdater SSAUpdate(&NewPHIs);
SSAUpdate.Initialize(LI);
const Type *LoadTy = LI->getType();
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
BasicBlock *BB = ValuesPerBlock[i].BB;
Value *AvailableVal = ValuesPerBlock[i].V;
unsigned Offset = ValuesPerBlock[i].Offset;
if (SSAUpdate.HasValueForBlock(BB))
continue;
if (AvailableVal->getType() != LoadTy) {
assert(TD && "Need target data to handle type mismatch case");
AvailableVal = GetStoreValueForLoad(AvailableVal, Offset, LoadTy,
BB->getTerminator(), *TD);
if (Offset) {
DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\n"
<< *ValuesPerBlock[i].V << '\n'
<< *AvailableVal << '\n' << "\n\n\n");
}
DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\n"
<< *ValuesPerBlock[i].V << '\n'
<< *AvailableVal << '\n' << "\n\n\n");
SSAUpdate.AddAvailableValue(BB, AvailableVal);
// Perform PHI construction.
Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
// If new PHI nodes were created, notify alias analysis.
if (isa<PointerType>(V->getType()))
for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
AA->copyValue(LI, NewPHIs[i]);
return V;
}
Owen Anderson
committed
static bool isLifetimeStart(Instruction *Inst) {
if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
Owen Anderson
committed
return II->getIntrinsicID() == Intrinsic::lifetime_start;
/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
/// non-local by performing PHI construction.
bool GVN::processNonLocalLoad(LoadInst *LI,
Chris Lattner
committed
SmallVectorImpl<Instruction*> &toErase) {
// Find the non-local dependencies of the load.
SmallVector<MemoryDependenceAnalysis::NonLocalDepEntry, 64> Deps;
MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
Deps);
//DEBUG(errs() << "INVESTIGATING NONLOCAL LOAD: "
// << Deps.size() << *LI << '\n');
// If we had to process more than one hundred blocks to find the
// dependencies, this load isn't worth worrying about. Optimizing
// it will be too expensive.
if (Deps.size() > 100)
return false;
// If we had a phi translation failure, we'll have a single entry which is a
// clobber in the current block. Reject this early.
if (Deps.size() == 1 && Deps[0].second.isClobber()) {
DEBUG(
errs() << "GVN: non-local load ";
WriteAsOperand(errs(), LI);
errs() << " is clobbered by " << *Deps[0].second.getInst() << '\n';
return false;
// Filter out useless results (non-locals, etc). Keep track of the blocks
// where we have a value available in repl, also keep track of whether we see
// dependencies that produce an unknown value for the load (such as a call
// that could potentially clobber the load).
SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
SmallVector<BasicBlock*, 16> UnavailableBlocks;
const TargetData *TD = 0;
for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
BasicBlock *DepBB = Deps[i].first;
MemDepResult DepInfo = Deps[i].second;
if (DepInfo.isClobber()) {
// If the dependence is to a store that writes to a superset of the bits
// read by the load, we can extract the bits we need for the load from the
// stored value.
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
if (TD == 0)
TD = getAnalysisIfAvailable<TargetData>();
if (TD) {
int Offset = AnalyzeLoadFromClobberingStore(LI, DepSI, *TD);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
DepSI->getOperand(0),
Offset));
continue;
}
}
}
// FIXME: Handle memset/memcpy.
UnavailableBlocks.push_back(DepBB);
continue;
}
Instruction *DepInst = DepInfo.getInst();
// Loading the allocation -> undef.
if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
Owen Anderson
committed
// Loading immediately after lifetime begin -> undef.
isLifetimeStart(DepInst)) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
UndefValue::get(LI->getType())));
continue;
}
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
// different types if we have to.
if (S->getOperand(0)->getType() != LI->getType()) {
if (TD == 0)
TD = getAnalysisIfAvailable<TargetData>();
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
LI->getType(), *TD)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
S->getOperand(0)));
continue;
}
if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
// If the types mismatch and we can't handle it, reject reuse of the load.
if (LD->getType() != LI->getType()) {
if (TD == 0)
TD = getAnalysisIfAvailable<TargetData>();
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
UnavailableBlocks.push_back(DepBB);
continue;
}
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
continue;
UnavailableBlocks.push_back(DepBB);
continue;
// If we have no predecessors that produce a known value for this load, exit
// early.
if (ValuesPerBlock.empty()) return false;
// If all of the instructions we depend on produce a known value for this
// load, then it is fully redundant and we can use PHI insertion to compute
// its value. Insert PHIs and remove the fully redundant value now.
if (UnavailableBlocks.empty()) {
DEBUG(errs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
// Perform PHI construction.
Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD,
VN.getAliasAnalysis());
LI->replaceAllUsesWith(V);
if (isa<PHINode>(V))
V->takeName(LI);
if (isa<PointerType>(V->getType()))
MD->invalidateCachedPointerInfo(V);
toErase.push_back(LI);
NumGVNLoad++;
return true;
if (!EnablePRE || !EnableLoadPRE)
return false;
// Okay, we have *some* definitions of the value. This means that the value
// is available in some of our (transitive) predecessors. Lets think about
// doing PRE of this load. This will involve inserting a new load into the
// predecessor when it's not available. We could do this in general, but
// prefer to not increase code size. As such, we only do this when we know
// that we only have to insert *one* load (which means we're basically moving
// the load, not inserting a new one).
Owen Anderson
committed
SmallPtrSet<BasicBlock *, 4> Blockers;
for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
Blockers.insert(UnavailableBlocks[i]);
// Lets find first basic block with more than one predecessor. Walk backwards
// through predecessors if needed.
BasicBlock *LoadBB = LI->getParent();
Owen Anderson
committed
BasicBlock *TmpBB = LoadBB;
bool isSinglePred = false;
bool allSingleSucc = true;
Owen Anderson
committed
while (TmpBB->getSinglePredecessor()) {
isSinglePred = true;
TmpBB = TmpBB->getSinglePredecessor();
if (!TmpBB) // If haven't found any, bail now.
return false;
if (TmpBB == LoadBB) // Infinite (unreachable) loop.
return false;
if (Blockers.count(TmpBB))
return false;
if (TmpBB->getTerminator()->getNumSuccessors() != 1)
allSingleSucc = false;
Owen Anderson
committed
}
Owen Anderson
committed
assert(TmpBB);
LoadBB = TmpBB;
// If we have a repl set with LI itself in it, this means we have a loop where
// at least one of the values is LI. Since this means that we won't be able
// to eliminate LI even if we insert uses in the other predecessors, we will
// end up increasing code size. Reject this by scanning for LI.
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
if (ValuesPerBlock[i].V == LI)
return false;
Owen Anderson
committed
if (isSinglePred) {
bool isHot = false;
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
if (Instruction *I = dyn_cast<Instruction>(ValuesPerBlock[i].V))
// "Hot" Instruction is in some loop (because it dominates its dep.
// instruction).
if (DT->dominates(LI, I)) {
isHot = true;
break;
}
Owen Anderson
committed
// We are interested only in "hot" instructions. We don't want to do any
// mis-optimizations here.
if (!isHot)
return false;
}
// Okay, we have some hope :). Check to see if the loaded value is fully
// available in all but one predecessor.
// FIXME: If we could restructure the CFG, we could make a common pred with
// all the preds that don't have an available LI and insert a new load into
// that one block.
BasicBlock *UnavailablePred = 0;
DenseMap<BasicBlock*, char> FullyAvailableBlocks;
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
FullyAvailableBlocks[UnavailableBlocks[i]] = false;
for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
PI != E; ++PI) {
if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
continue;
// If this load is not available in multiple predecessors, reject it.
if (UnavailablePred && UnavailablePred != *PI)
return false;
UnavailablePred = *PI;
}
assert(UnavailablePred != 0 &&
"Fully available value should be eliminated above!");
// We don't currently handle critical edges :(
if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) {
DEBUG(errs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
<< UnavailablePred->getName() << "': " << *LI << '\n');
return false;
}
// Do PHI translation to get its value in the predecessor if necessary. The
// returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
//
// FIXME: This may insert a computation, but we don't tell scalar GVN
// optimization stuff about it. How do we do this?
SmallVector<Instruction*, 8> NewInsts;
Value *LoadPtr = 0;
// If all preds have a single successor, then we know it is safe to insert the
// load on the pred (?!?), so we can insert code to materialize the pointer if
// it is not available.
if (allSingleSucc) {
LoadPtr = MD->InsertPHITranslatedPointer(LI->getOperand(0), LoadBB,
UnavailablePred, TD, *DT,NewInsts);
} else {
LoadPtr = MD->GetAvailablePHITranslatedValue(LI->getOperand(0), LoadBB,
UnavailablePred, TD, *DT);
}
// Assign value numbers to these new instructions.
for (SmallVector<Instruction*, 8>::iterator NI = NewInsts.begin(),
NE = NewInsts.end(); NI != NE; ++NI) {
// FIXME: We really _ought_ to insert these value numbers into their
// parent's availability map. However, in doing so, we risk getting into
// ordering issues. If a block hasn't been processed yet, we would be
// marking a value as AVAIL-IN, which isn't what we intend.
VN.lookup_or_add(*NI);
}
// If we couldn't find or insert a computation of this phi translated value,
// we fail PRE.
if (LoadPtr == 0) {
DEBUG(errs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
<< *LI->getOperand(0) << "\n");
return false;
// Make sure it is valid to move this load here. We have to watch out for:
// @1 = getelementptr (i8* p, ...
// test p and branch if == 0
// load @1
// It is valid to have the getelementptr before the test, even if p can be 0,
// as getelementptr only does address arithmetic.
// If we are not pushing the value through any multiple-successor blocks
// we do not have this case. Otherwise, check that the load is safe to
// put anywhere; this can be improved, but should be conservatively safe.
if (!allSingleSucc &&
// FIXME: REEVALUTE THIS.
!isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) {
assert(NewInsts.empty() && "Should not have inserted instructions");
// Okay, we can eliminate this load by inserting a reload in the predecessor
// and using PHI construction to get the value in the other predecessors, do
// it.
DEBUG(errs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
DEBUG(if (!NewInsts.empty())
errs() << "INSERTED " << NewInsts.size() << " INSTS: "
<< *NewInsts.back() << '\n');
Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
LI->getAlignment(),
UnavailablePred->getTerminator());
// Add the newly created load.
ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,NewLoad));
// Perform PHI construction.
Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD,
VN.getAliasAnalysis());
LI->replaceAllUsesWith(V);
if (isa<PHINode>(V))
V->takeName(LI);
if (isa<PointerType>(V->getType()))
MD->invalidateCachedPointerInfo(V);
toErase.push_back(LI);
NumPRELoad++;
return true;
}
/// processLoad - Attempt to eliminate a load, first by eliminating it
/// locally, and then attempting non-local elimination if that fails.
bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
if (!MD)
return false;
if (L->isVolatile())
Owen Anderson
committed
return false;
Owen Anderson
committed
// ... to a pointer that has been loaded from before...
MemDepResult Dep = MD->getDependency(L);
// If the value isn't available, don't do anything!
// FIXME: We should handle memset/memcpy/memmove as dependent instructions
// to forward the value if available.
//if (isa<MemIntrinsic>(Dep.getInst()))
//errs() << "LOAD DEPENDS ON MEM: " << *L << "\n" << *Dep.getInst()<<"\n\n";
// Check to see if we have something like this:
// store i32 123, i32* %P
// %A = bitcast i32* %P to i8*
// %B = gep i8* %A, i32 1
// %C = load i8* %B
//
// We could do that by recognizing if the clobber instructions are obviously
// a common base + constant offset, and if the previous store (or memset)
// completely covers this load. This sort of thing can happen in bitfield
// access code.
if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
int Offset = AnalyzeLoadFromClobberingStore(L, DepSI, *TD);
if (Offset != -1) {
Value *AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
L->getType(), L, *TD);
DEBUG(errs() << "GVN COERCED STORE BITS:\n" << *DepSI << '\n'
<< *AvailVal << '\n' << *L << "\n\n\n");
// Replace the load!
L->replaceAllUsesWith(AvailVal);
if (isa<PointerType>(AvailVal->getType()))
MD->invalidateCachedPointerInfo(AvailVal);
toErase.push_back(L);
NumGVNLoad++;
return true;
}
DEBUG(
// fast print dep, using operator<< on instruction would be too slow
errs() << "GVN: load ";
WriteAsOperand(errs(), L);
Instruction *I = Dep.getInst();
errs() << " is clobbered by " << *I << '\n';
);
}
// If it is defined in another block, try harder.
return processNonLocalLoad(L, toErase);
Instruction *DepInst = Dep.getInst();
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
Value *StoredVal = DepSI->getOperand(0);
// The store and load are to a must-aliased pointer, but they may not
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
const TargetData *TD = 0;
if (StoredVal->getType() != L->getType()) {
if ((TD = getAnalysisIfAvailable<TargetData>())) {
StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
L, *TD);
if (StoredVal == 0)
return false;
DEBUG(errs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
<< '\n' << *L << "\n\n\n");
}
else
return false;
}
L->replaceAllUsesWith(StoredVal);
if (isa<PointerType>(StoredVal->getType()))
MD->invalidateCachedPointerInfo(StoredVal);
toErase.push_back(L);
NumGVNLoad++;
return true;
}
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
Value *AvailableVal = DepLI;
// The loads are of a must-aliased pointer, but they may not actually have
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
const TargetData *TD = 0;
if (DepLI->getType() != L->getType()) {
if ((TD = getAnalysisIfAvailable<TargetData>())) {
AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
if (AvailableVal == 0)
return false;
DEBUG(errs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
<< "\n" << *L << "\n\n\n");
}
else
return false;
L->replaceAllUsesWith(AvailableVal);
if (isa<PointerType>(DepLI->getType()))
MD->invalidateCachedPointerInfo(DepLI);
toErase.push_back(L);
NumGVNLoad++;
return true;
Owen Anderson
committed
}
// If this load really doesn't depend on anything, then we must be loading an
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example.
Victor Hernandez
committed
if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
toErase.push_back(L);
NumGVNLoad++;
}
Owen Anderson
committed
// If this load occurs either right after a lifetime begin,
// then the loaded value is undefined.
if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
Owen Anderson
committed
if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
toErase.push_back(L);
NumGVNLoad++;
return true;
}
}
Owen Anderson
committed
}
Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
Owen Anderson
committed
DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
if (I == localAvail.end())
return 0;
ValueNumberScope *Locals = I->second;
while (Locals) {
DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
if (I != Locals->table.end())
Owen Anderson
committed
return I->second;
Owen Anderson
committed
}
Owen Anderson
committed
return 0;
}
Owen Anderson
committed
Owen Anderson
committed
/// processInstruction - When calculating availability, handle an instruction
Owen Anderson
committed
/// by inserting it into the appropriate sets
bool GVN::processInstruction(Instruction *I,
Chris Lattner
committed
SmallVectorImpl<Instruction*> &toErase) {
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
bool Changed = processLoad(LI, toErase);
if (!Changed) {
unsigned Num = VN.lookup_or_add(LI);
localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
Owen Anderson
committed
}
Owen Anderson
committed
}
uint32_t NextNum = VN.getNextUnusedValueNumber();
unsigned Num = VN.lookup_or_add(I);
if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
return false;
Value *BranchCond = BI->getCondition();
uint32_t CondVN = VN.lookup_or_add(BranchCond);
BasicBlock *TrueSucc = BI->getSuccessor(0);
BasicBlock *FalseSucc = BI->getSuccessor(1);
if (TrueSucc->getSinglePredecessor())
localAvail[TrueSucc]->table[CondVN] =
ConstantInt::getTrue(TrueSucc->getContext());
if (FalseSucc->getSinglePredecessor())
localAvail[FalseSucc]->table[CondVN] =
ConstantInt::getFalse(TrueSucc->getContext());
return false;
Owen Anderson
committed
// Allocations are always uniquely numbered, so we can save time and memory
// by fast failing them.
Victor Hernandez
committed
} else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
Owen Anderson
committed
return false;
Owen Anderson
committed
}
Owen Anderson
committed
if (PHINode* p = dyn_cast<PHINode>(I)) {
Value *constVal = CollapsePhi(p);
Owen Anderson
committed
if (constVal) {
p->replaceAllUsesWith(constVal);
if (MD && isa<PointerType>(constVal->getType()))
MD->invalidateCachedPointerInfo(constVal);
Owen Anderson
committed
VN.erase(p);
Owen Anderson
committed
} else {
localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
Owen Anderson
committed
}
Owen Anderson
committed
// If the number we were assigned was a brand new VN, then we don't
// need to do a lookup to see if the number already exists
// somewhere in the domtree: it can't!
} else if (Num == NextNum) {
localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
Owen Anderson
committed
// Perform fast-path value-number based elimination of values inherited from
// dominators.
} else if (Value *repl = lookupNumber(I->getParent(), Num)) {
Owen Anderson
committed
// Remove it!
Owen Anderson
committed
I->replaceAllUsesWith(repl);
if (MD && isa<PointerType>(repl->getType()))
MD->invalidateCachedPointerInfo(repl);
Owen Anderson
committed
toErase.push_back(I);
return true;
Owen Anderson
committed
Owen Anderson
committed
} else {
localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
Owen Anderson
committed
}
Owen Anderson
committed
return false;
}
/// runOnFunction - This is the main transformation entry point for a function.
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTree>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
VN.setDomTree(DT);
bool Changed = false;
bool ShouldContinue = true;
Owen Anderson
committed
// Merge unconditional branches, allowing PRE to catch more
// optimization opportunities.
for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
Owen Anderson
committed
++FI;
bool removedBlock = MergeBlockIntoPredecessor(BB, this);
if (removedBlock) NumGVNBlocks++;
Owen Anderson
committed
}
DEBUG(errs() << "GVN iteration: " << Iteration << "\n");
ShouldContinue = iterateOnFunction(F);
Changed |= ShouldContinue;
Owen Anderson
committed
if (EnablePRE) {
bool PREChanged = true;
while (PREChanged) {
PREChanged = performPRE(F);
Owen Anderson
committed
}
// FIXME: Should perform GVN again after PRE does something. PRE can move
// computations into blocks where they become fully redundant. Note that
// we can't do this until PRE's critical edge splitting updates memdep.
// Actually, when this happens, we should just fully integrate PRE into GVN.
cleanupGlobalSets();
bool GVN::processBlock(BasicBlock *BB) {
// FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
// incrementing BI before processing an instruction).
SmallVector<Instruction*, 8> toErase;
bool ChangedFunction = false;
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
BI != BE;) {
ChangedFunction |= processInstruction(BI, toErase);
if (toErase.empty()) {
++BI;
continue;
}
// If we need some instructions deleted, do it now.
NumGVNInstr += toErase.size();
// Avoid iterator invalidation.
bool AtStart = BI == BB->begin();
if (!AtStart)
--BI;
for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
E = toErase.end(); I != E; ++I) {
DEBUG(errs() << "GVN removed: " << **I << '\n');
if (MD) MD->removeInstruction(*I);
if (AtStart)
BI = BB->begin();
else
++BI;
}
Owen Anderson
committed
/// performPRE - Perform a purely local form of PRE that looks for diamond
/// control flow patterns and attempts to perform simple PRE at the join point.
bool GVN::performPRE(Function &F) {
Chris Lattner
committed
bool Changed = false;
SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
DenseMap<BasicBlock*, Value*> predMap;
Owen Anderson
committed
for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
BasicBlock *CurrentBlock = *DI;
Owen Anderson
committed
// Nothing to PRE in the entry block.
if (CurrentBlock == &F.getEntryBlock()) continue;
Owen Anderson
committed
for (BasicBlock::iterator BI = CurrentBlock->begin(),
BE = CurrentBlock->end(); BI != BE; ) {
Chris Lattner
committed
Instruction *CurInst = BI++;
Victor Hernandez
committed
if (isa<AllocaInst>(CurInst) ||
Victor Hernandez
committed
isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
isa<DbgInfoIntrinsic>(CurInst))
Owen Anderson
committed
continue;
uint32_t ValNo = VN.lookup(CurInst);
Owen Anderson
committed
// Look for the predecessors for PRE opportunities. We're
// only trying to solve the basic diamond case, where
// a value is computed in the successor and one predecessor,
// but not the other. We also explicitly disallow cases
// where the successor is its own predecessor, because they're
// more complicated to get right.
unsigned NumWith = 0;
unsigned NumWithout = 0;
BasicBlock *PREPred = 0;
predMap.clear();
Owen Anderson
committed
for (pred_iterator PI = pred_begin(CurrentBlock),
PE = pred_end(CurrentBlock); PI != PE; ++PI) {
// We're not interested in PRE where the block is its
Owen Anderson
committed
// own predecessor, on in blocks with predecessors
// that are not reachable.
if (*PI == CurrentBlock) {
Owen Anderson
committed
break;
} else if (!localAvail.count(*PI)) {
Owen Anderson
committed
break;
}
DenseMap<uint32_t, Value*>::iterator predV =
localAvail[*PI]->table.find(ValNo);
Owen Anderson
committed
if (predV == localAvail[*PI]->table.end()) {
Owen Anderson
committed
PREPred = *PI;
Chris Lattner
committed
} else if (predV->second == CurInst) {
Owen Anderson
committed
} else {
Owen Anderson
committed
predMap[*PI] = predV->second;
Owen Anderson
committed
}
}
Owen Anderson
committed
// Don't do PRE when it might increase code size, i.e. when
// we would need to insert instructions in more than one pred.
if (NumWithout != 1 || NumWith == 0)
Owen Anderson
committed
continue;
// Don't do PRE across indirect branch.
if (isa<IndirectBrInst>(PREPred->getTerminator()))
continue;
// We can't do PRE safely on a critical edge, so instead we schedule
// the edge to be split and perform the PRE the next time we iterate
// on the function.
for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors();
i != e; ++i)
if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) {
if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
Owen Anderson
committed
// Instantiate the expression the in predecessor that lacked it.
// Because we are going top-down through the block, all value numbers
// will be available in the predecessor by the time we need them. Any
// that weren't original present will have been instantiated earlier
// in this loop.
Instruction *PREInstr = CurInst->clone();
Owen Anderson
committed
bool success = true;
Chris Lattner
committed
for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
Value *Op = PREInstr->getOperand(i);
if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
continue;
Chris Lattner
committed
if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
PREInstr->setOperand(i, V);
} else {
success = false;
break;
Owen Anderson
committed
}
Owen Anderson
committed
// Fail out if we encounter an operand that is not available in
// the PRE predecessor. This is typically because of loads which
Owen Anderson
committed
// are not value numbered precisely.
if (!success) {
delete PREInstr;
DEBUG(verifyRemoved(PREInstr));
Owen Anderson
committed
continue;
}
Owen Anderson
committed
PREInstr->insertBefore(PREPred->getTerminator());
Chris Lattner
committed
PREInstr->setName(CurInst->getName() + ".pre");
Owen Anderson
committed
predMap[PREPred] = PREInstr;
Owen Anderson
committed
NumGVNPRE++;
Owen Anderson
committed
// Update the availability map to include the new instruction.
localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
Owen Anderson
committed
// Create a PHI to make the value available in this block.
Chris Lattner
committed
PHINode* Phi = PHINode::Create(CurInst->getType(),
CurInst->getName() + ".pre-phi",
Owen Anderson
committed
CurrentBlock->begin());
for (pred_iterator PI = pred_begin(CurrentBlock),
PE = pred_end(CurrentBlock); PI != PE; ++PI)
Owen Anderson
committed
Phi->addIncoming(predMap[*PI], *PI);
VN.add(Phi, ValNo);
localAvail[CurrentBlock]->table[ValNo] = Phi;
Chris Lattner
committed
CurInst->replaceAllUsesWith(Phi);
if (MD && isa<PointerType>(Phi->getType()))
MD->invalidateCachedPointerInfo(Phi);
Chris Lattner
committed
VN.erase(CurInst);
DEBUG(errs() << "GVN PRE removed: " << *CurInst << '\n');
if (MD) MD->removeInstruction(CurInst);
Chris Lattner
committed
CurInst->eraseFromParent();
Chris Lattner
committed
Changed = true;