Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
/// byte store (e.g. i16 0x1234), return null.
static Value *isBytewiseValue(Value *V) {
// All byte-wide stores are splatable, even of arbitrary variables.
if (V->getType() == Type::Int8Ty) return V;
// Constant float and double values can be handled as integer values if the
// corresponding integer value is "byteable". An important case is 0.0.
if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
if (CFP->getType() == Type::FloatTy)
V = ConstantExpr::getBitCast(CFP, Type::Int32Ty);
if (CFP->getType() == Type::DoubleTy)
V = ConstantExpr::getBitCast(CFP, Type::Int64Ty);
// Don't handle long double formats, which have strange constraints.
}
// We can handle constant integers that are power of two in size and a
// multiple of 8 bits.
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
unsigned Width = CI->getBitWidth();
if (isPowerOf2_32(Width) && Width > 8) {
// We can handle this value if the recursive binary decomposition is the
// same at all levels.
APInt Val = CI->getValue();
APInt Val2;
while (Val.getBitWidth() != 8) {
unsigned NextWidth = Val.getBitWidth()/2;
Val2 = Val.lshr(NextWidth);
Val2.trunc(Val.getBitWidth()/2);
Val.trunc(Val.getBitWidth()/2);
// If the top/bottom halves aren't the same, reject it.
if (Val != Val2)
return 0;
}
return ConstantInt::get(Val);
}
}
// Conceptually, we could handle things like:
// %a = zext i8 %X to i16
// %b = shl i16 %a, 8
// %c = or i16 %a, %b
// but until there is an example that actually needs this, it doesn't seem
// worth worrying about.
return 0;
}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
bool &VariableIdxFound, TargetData &TD) {
// Skip over the first indices.
gep_type_iterator GTI = gep_type_begin(GEP);
for (unsigned i = 1; i != Idx; ++i, ++GTI)
/*skip along*/;
// Compute the offset implied by the rest of the indices.
int64_t Offset = 0;
for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
if (OpC == 0)
return VariableIdxFound = true;
if (OpC->isZero()) continue; // No offset.
// Handle struct indices, which add their field offset to the pointer.
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
continue;
}
// Otherwise, we have a sequential type like an array or vector. Multiply
// the index by the ElementSize.
uint64_t Size = TD.getABITypeSize(GTI.getIndexedType());
Offset += Size*OpC->getSExtValue();
}
return Offset;
}
/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
/// constant offset, and return that constant offset. For example, Ptr1 might
/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
TargetData &TD) {
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
// Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
// base. After that base, they may have some number of common (and
// potentially variable) indices. After that they handle some constant
// offset, which determines their offset from each other. At this point, we
// handle no other case.
GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
return false;
// Skip any common indices and track the GEP types.
unsigned Idx = 1;
for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
break;
bool VariableIdxFound = false;
int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
if (VariableIdxFound) return false;
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
Offset = Offset2-Offset1;
return true;
}
/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
/// This allows us to analyze stores like:
/// store 0 -> P+1
/// store 0 -> P+0
/// store 0 -> P+3
/// store 0 -> P+2
/// which sometimes happens with stores to arrays of structs etc. When we see
/// the first store, we make a range [1, 2). The second store extends the range
/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
/// two ranges into [0, 3) which is memset'able.
namespace {
struct MemsetRange {
// Start/End - A semi range that describes the span that this range covers.
// The range is closed at the start and open at the end: [Start, End).
int64_t Start, End;
/// StartPtr - The getelementptr instruction that points to the start of the
/// range.
Value *StartPtr;
/// Alignment - The known alignment of the first store.
unsigned Alignment;
/// TheStores - The actual stores that make up this range.
SmallVector<StoreInst*, 16> TheStores;
};
class MemsetRanges {
/// Ranges - A sorted list of the memset ranges. We use std::list here
/// because each element is relatively large and expensive to copy.
std::list<MemsetRange> Ranges;
typedef std::list<MemsetRange>::iterator range_iterator;
TargetData &TD;
public:
MemsetRanges(TargetData &td) : TD(td) {}
typedef std::list<MemsetRange>::const_iterator const_iterator;
const_iterator begin() const { return Ranges.begin(); }
const_iterator end() const { return Ranges.end(); }
void addStore(int64_t OffsetFromFirst, StoreInst *SI);
};
}
/// addStore - Add a new store to the MemsetRanges data structure. This adds a
/// new range for the specified store at the specified offset, merging into
/// existing ranges as appropriate.
void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType());
// Do a linear search of the ranges to see if this can be joined and/or to
// find the insertion point in the list. We keep the ranges sorted for
// simplicity here. This is a linear search of a linked list, which is ugly,
// however the number of ranges is limited, so this won't get crazy slow.
range_iterator I = Ranges.begin(), E = Ranges.end();
while (I != E && Start > I->End)
++I;
// We now know that I == E, in which case we didn't find anything to merge
// with, or that Start <= I->End. If End < I->Start or I == E, then we need
// to insert a new range. Handle this now.
if (I == E || End < I->Start) {
MemsetRange &R = *Ranges.insert(I, MemsetRange());
R.Start = Start;
R.End = End;
R.StartPtr = SI->getPointerOperand();
R.Alignment = SI->getAlignment();
R.TheStores.push_back(SI);
return;
}
// This store overlaps with I, add it.
I->TheStores.push_back(SI);
// At this point, we may have an interval that completely contains our store.
// If so, just add it to the interval and return.
if (I->Start <= Start && I->End >= End)
return;
// Now we know that Start <= I->End and End >= I->Start so the range overlaps
// but is not entirely contained within the range.
// See if the range extends the start of the range. In this case, it couldn't
// possibly cause it to join the prior range, because otherwise we would have
// stopped on *it*.
if (Start < I->Start)
I->Start = Start;
// Now we know that Start <= I->End and Start >= I->Start (so the startpoint
// is in or right at the end of I), and that End >= I->Start. Extend I out to
// End.
if (End > I->End) {
I->End = End;
range_iterator NextI = I;;
while (++NextI != E && End >= NextI->Start) {
// Merge the range in.
I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
if (NextI->End > I->End)
I->End = NextI->End;
Ranges.erase(NextI);
NextI = I;
}
}
}
/// processStore - When GVN is scanning forward over instructions, we look for
/// some other patterns to fold away. In particular, this looks for stores to
/// neighboring locations of memory. If it sees enough consequtive ones
/// (currently 4) it attempts to merge them together into a memcpy/memset.
bool GVN::processStore(StoreInst *SI, SmallVectorImpl<Instruction*> &toErase) {
if (!FormMemSet) return false;
if (SI->isVolatile()) return false;
// There are two cases that are interesting for this code to handle: memcpy
// and memset. Right now we only handle memset.
// Ensure that the value being stored is something that can be memset'able a
// byte at a time like "0" or "-1" or any width, as well as things like
// 0xA0A0A0A0 and 0.0.
Value *ByteVal = isBytewiseValue(SI->getOperand(0));
if (!ByteVal)
return false;
TargetData &TD = getAnalysis<TargetData>();
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
// Okay, so we now have a single store that can be splatable. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
MemsetRanges Ranges(TD);
// Add our first pointer.
Ranges.addStore(0, SI);
Value *StartPtr = SI->getPointerOperand();
BasicBlock::iterator BI = SI;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
// If the call is readnone, ignore it, otherwise bail out. We don't even
// allow readonly here because we don't want something like:
// A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
if (AA.getModRefBehavior(CallSite::get(BI)) ==
AliasAnalysis::DoesNotAccessMemory)
continue;
// TODO: If this is a memset, try to join it in.
break;
} else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI))
break;
// If this is a non-store instruction it is fine, ignore it.
StoreInst *NextStore = dyn_cast<StoreInst>(BI);
if (NextStore == 0) continue;
// If this is a store, see if we can merge it in.
if (NextStore->isVolatile()) break;
// Check to see if this stored value is of the same byte-splattable value.
if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
break;
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, TD))
break;
Ranges.addStore(Offset, NextStore);
}
Function *MemSetF = 0;
// Now that we have full information about ranges, loop over the ranges and
// emit memset's for anything big enough to be worthwhile.
bool MadeChange = false;
for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
const MemsetRange &Range = *I;
// If we found less than 4 stores to merge, ignore the subrange: it isn't
// worth losing type information in llvm IR to do the transformation.
if (Range.TheStores.size() < 4)
continue;
// Otherwise, we do want to transform this! Create a new memset. We put
// the memset right after the first store that we found in this block. This
// ensures that the caller will increment the iterator to the memset before
// it deletes all the stores.
BasicBlock::iterator InsertPt = SI; ++InsertPt;
if (MemSetF == 0)
MemSetF = Intrinsic::getDeclaration(SI->getParent()->getParent()
->getParent(), Intrinsic::memset_i64);
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
// StartPtr may not dominate the starting point. Instead of using it, base
// the destination pointer off the input to the first store in the block.
StartPtr = SI->getPointerOperand();
// Cast the start ptr to be i8* as memset requires.
const Type *i8Ptr = PointerType::getUnqual(Type::Int8Ty);
if (StartPtr->getType() != i8Ptr)
StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getNameStart(),
InsertPt);
// Offset the pointer if needed.
if (Range.Start)
StartPtr = new GetElementPtrInst(StartPtr, ConstantInt::get(Type::Int64Ty,
Range.Start),
"ptroffset", InsertPt);
Value *Ops[] = {
StartPtr, ByteVal, // Start, value
ConstantInt::get(Type::Int64Ty, Range.End-Range.Start), // size
ConstantInt::get(Type::Int32Ty, Range.Alignment) // align
};
new CallInst(MemSetF, Ops, Ops+4, "", InsertPt);
// Zap all the stores.
toErase.append(Range.TheStores.begin(), Range.TheStores.end());
++NumMemSetInfer;
MadeChange = true;
}
return MadeChange;
}
Owen Anderson
committed
/// performCallSlotOptzn - takes a memcpy and a call that it depends on,
/// and checks for the possibility of a call slot optimization by having
/// the call write its result directly into the destination of the memcpy.
Chris Lattner
committed
bool GVN::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C,
SmallVectorImpl<Instruction*> &toErase) {
Owen Anderson
committed
// The general transformation to keep in mind is
//
// call @func(..., src, ...)
// memcpy(dest, src, ...)
//
// ->
//
// memcpy(dest, src, ...)
// call @func(..., dest, ...)
//
// Since moving the memcpy is technically awkward, we additionally check that
// src only holds uninitialized values at the moment of the call, meaning that
// the memcpy can be discarded rather than moved.
Owen Anderson
committed
Owen Anderson
committed
// Deliberately get the source and destination with bitcasts stripped away,
// because we'll need to do type comparisons based on the underlying type.
Owen Anderson
committed
Value* cpyDest = cpy->getDest();
Owen Anderson
committed
Value* cpySrc = cpy->getSource();
CallSite CS = CallSite::get(C);
Owen Anderson
committed
// We need to be able to reason about the size of the memcpy, so we require
// that it be a constant.
Owen Anderson
committed
ConstantInt* cpyLength = dyn_cast<ConstantInt>(cpy->getLength());
if (!cpyLength)
Owen Anderson
committed
return false;
Owen Anderson
committed
// Require that src be an alloca. This simplifies the reasoning considerably.
AllocaInst* srcAlloca = dyn_cast<AllocaInst>(cpySrc);
if (!srcAlloca)
return false;
// Check that all of src is copied to dest.
Owen Anderson
committed
TargetData& TD = getAnalysis<TargetData>();
Owen Anderson
committed
ConstantInt* srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
if (!srcArraySize)
Owen Anderson
committed
return false;
Owen Anderson
committed
uint64_t srcSize = TD.getABITypeSize(srcAlloca->getAllocatedType()) *
srcArraySize->getZExtValue();
if (cpyLength->getZExtValue() < srcSize)
Owen Anderson
committed
return false;
Owen Anderson
committed
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
// Check that accessing the first srcSize bytes of dest will not cause a
// trap. Otherwise the transform is invalid since it might cause a trap
// to occur earlier than it otherwise would.
if (AllocaInst* A = dyn_cast<AllocaInst>(cpyDest)) {
// The destination is an alloca. Check it is larger than srcSize.
ConstantInt* destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
if (!destArraySize)
return false;
uint64_t destSize = TD.getABITypeSize(A->getAllocatedType()) *
destArraySize->getZExtValue();
if (destSize < srcSize)
return false;
} else if (Argument* A = dyn_cast<Argument>(cpyDest)) {
// If the destination is an sret parameter then only accesses that are
// outside of the returned struct type can trap.
if (!A->hasStructRetAttr())
return false;
const Type* StructTy = cast<PointerType>(A->getType())->getElementType();
uint64_t destSize = TD.getABITypeSize(StructTy);
if (destSize < srcSize)
return false;
} else {
Owen Anderson
committed
return false;
Owen Anderson
committed
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
}
// Check that src is not accessed except via the call and the memcpy. This
// guarantees that it holds only undefined values when passed in (so the final
// memcpy can be dropped), that it is not read or written between the call and
// the memcpy, and that writing beyond the end of it is undefined.
SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
srcAlloca->use_end());
while (!srcUseList.empty()) {
User* UI = srcUseList.back();
srcUseList.pop_back();
if (isa<GetElementPtrInst>(UI) || isa<BitCastInst>(UI)) {
for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
I != E; ++I)
srcUseList.push_back(*I);
} else if (UI != C && UI != cpy) {
return false;
}
}
// Since we're changing the parameter to the callsite, we need to make sure
// that what would be the new parameter dominates the callsite.
DominatorTree& DT = getAnalysis<DominatorTree>();
if (Instruction* cpyDestInst = dyn_cast<Instruction>(cpyDest))
if (!DT.dominates(cpyDestInst, C))
return false;
// In addition to knowing that the call does not access src in some
// unexpected manner, for example via a global, which we deduce from
// the use analysis, we also need to know that it does not sneakily
// access dest. We rely on AA to figure this out for us.
Owen Anderson
committed
AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
Owen Anderson
committed
if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) !=
Owen Anderson
committed
AliasAnalysis::NoModRef)
return false;
Owen Anderson
committed
// All the checks have passed, so do the transformation.
for (unsigned i = 0; i < CS.arg_size(); ++i)
if (CS.getArgument(i) == cpySrc) {
if (cpySrc->getType() != cpyDest->getType())
cpyDest = CastInst::createPointerCast(cpyDest, cpySrc->getType(),
cpyDest->getName(), C);
Owen Anderson
committed
CS.setArgument(i, cpyDest);
}
Owen Anderson
committed
Owen Anderson
committed
// Drop any cached information about the call, because we may have changed
// its dependence information by changing its parameter.
Owen Anderson
committed
MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
MD.dropInstruction(C);
Owen Anderson
committed
Owen Anderson
committed
// Remove the memcpy
Owen Anderson
committed
MD.removeInstruction(cpy);
Owen Anderson
committed
toErase.push_back(cpy);
Owen Anderson
committed
Owen Anderson
committed
return true;
}
/// processMemCpy - perform simplication of memcpy's. If we have memcpy A which
/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be
/// a memcpy from X to Z (or potentially a memmove, depending on circumstances).
/// This allows later passes to remove the first memcpy altogether.
Owen Anderson
committed
bool GVN::processMemCpy(MemCpyInst* M, MemCpyInst* MDep,
Chris Lattner
committed
SmallVectorImpl<Instruction*> &toErase) {
// We can only transforms memcpy's where the dest of one is the source of the
// other
if (M->getSource() != MDep->getDest())
return false;
// Second, the length of the memcpy's must be the same, or the preceeding one
// must be larger than the following one.
ConstantInt* C1 = dyn_cast<ConstantInt>(MDep->getLength());
ConstantInt* C2 = dyn_cast<ConstantInt>(M->getLength());
if (!C1 || !C2)
return false;
uint64_t DepSize = C1->getValue().getZExtValue();
uint64_t CpySize = C2->getValue().getZExtValue();
if (DepSize < CpySize)
return false;
// Finally, we have to make sure that the dest of the second does not
// alias the source of the first
AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) !=
AliasAnalysis::NoAlias)
return false;
else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) !=
AliasAnalysis::NoAlias)
return false;
else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize)
!= AliasAnalysis::NoAlias)
return false;
// If all checks passed, then we can transform these memcpy's
Owen Anderson
committed
Function* MemCpyFun = Intrinsic::getDeclaration(
M->getParent()->getParent()->getParent(),
Owen Anderson
committed
M->getIntrinsicID());
std::vector<Value*> args;
args.push_back(M->getRawDest());
args.push_back(MDep->getRawSource());
args.push_back(M->getLength());
args.push_back(M->getAlignment());
Owen Anderson
committed
CallInst* C = new CallInst(MemCpyFun, args.begin(), args.end(), "", M);
Owen Anderson
committed
MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
if (MD.getDependency(C) == MDep) {
MD.dropInstruction(M);
toErase.push_back(M);
return true;
}
MD.removeInstruction(C);
toErase.push_back(C);
return false;
}
Owen Anderson
committed
/// processInstruction - When calculating availability, handle an instruction
Owen Anderson
committed
/// by inserting it into the appropriate sets
Chris Lattner
committed
bool GVN::processInstruction(Instruction *I, ValueNumberedSet &currAvail,
DenseMap<Value*, LoadInst*> &lastSeenLoad,
SmallVectorImpl<Instruction*> &toErase) {
if (LoadInst* L = dyn_cast<LoadInst>(I))
Owen Anderson
committed
return processLoad(L, lastSeenLoad, toErase);
Chris Lattner
committed
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return processStore(SI, toErase);
Chris Lattner
committed
if (MemCpyInst* M = dyn_cast<MemCpyInst>(I)) {
Owen Anderson
committed
MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
// The are two possible optimizations we can do for memcpy:
// a) memcpy-memcpy xform which exposes redundance for DSE
Owen Anderson
committed
// b) call-memcpy xform for return slot optimization
Owen Anderson
committed
Instruction* dep = MD.getDependency(M);
if (dep == MemoryDependenceAnalysis::None ||
dep == MemoryDependenceAnalysis::NonLocal)
return false;
if (MemCpyInst *MemCpy = dyn_cast<MemCpyInst>(dep))
return processMemCpy(M, MemCpy, toErase);
if (CallInst* C = dyn_cast<CallInst>(dep))
Owen Anderson
committed
return performCallSlotOptzn(M, C, toErase);
Owen Anderson
committed
}
unsigned num = VN.lookup_or_add(I);
Owen Anderson
committed
if (PHINode* p = dyn_cast<PHINode>(I)) {
Value* constVal = CollapsePhi(p);
Owen Anderson
committed
if (constVal) {
for (PhiMapType::iterator PI = phiMap.begin(), PE = phiMap.end();
PI != PE; ++PI)
if (PI->second.count(p))
PI->second.erase(p);
Owen Anderson
committed
p->replaceAllUsesWith(constVal);
toErase.push_back(p);
Owen Anderson
committed
}
// Perform value-number based elimination
Owen Anderson
committed
} else if (currAvail.test(num)) {
Owen Anderson
committed
Value* repl = find_leader(currAvail, num);
Owen Anderson
committed
if (CallInst* CI = dyn_cast<CallInst>(I)) {
AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
if (!AA.doesNotAccessMemory(CI)) {
Owen Anderson
committed
MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
if (cast<Instruction>(repl)->getParent() != CI->getParent() ||
MD.getDependency(CI) != MD.getDependency(cast<CallInst>(repl))) {
Owen Anderson
committed
// There must be an intervening may-alias store, so nothing from
// this point on will be able to be replaced with the preceding call
currAvail.erase(repl);
currAvail.insert(I);
return false;
}
}
}
Owen Anderson
committed
// Remove it!
MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
MD.removeInstruction(I);
Owen Anderson
committed
I->replaceAllUsesWith(repl);
toErase.push_back(I);
return true;
} else if (!I->isTerminator()) {
currAvail.set(num);
currAvail.insert(I);
}
return false;
}
// GVN::runOnFunction - This is the main transformation entry point for a
// function.
//
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
bool changed = false;
bool shouldContinue = true;
while (shouldContinue) {
shouldContinue = iterateOnFunction(F);
changed |= shouldContinue;
}
return changed;
}
// GVN::iterateOnFunction - Executes one iteration of GVN
bool GVN::iterateOnFunction(Function &F) {
Owen Anderson
committed
// Clean out global sets from any previous functions
VN.clear();
availableOut.clear();
Owen Anderson
committed
bool changed_function = false;
DominatorTree &DT = getAnalysis<DominatorTree>();
SmallVector<Instruction*, 4> toErase;
DenseMap<Value*, LoadInst*> lastSeenLoad;
Owen Anderson
committed
// Top-down walk of the dominator tree
for (df_iterator<DomTreeNode*> DI = df_begin(DT.getRootNode()),
E = df_end(DT.getRootNode()); DI != E; ++DI) {
// Get the set to update for this block
ValueNumberedSet& currAvail = availableOut[DI->getBlock()];
lastSeenLoad.clear();
Owen Anderson
committed
BasicBlock* BB = DI->getBlock();
// A block inherits AVAIL_OUT from its dominator
if (DI->getIDom() != 0)
currAvail = availableOut[DI->getIDom()->getBlock()];
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
changed_function |= processInstruction(BI, currAvail,
lastSeenLoad, toErase);
NumGVNInstr += toErase.size();
// Avoid iterator invalidation
++BI;
Owen Anderson
committed
for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
(*I)->eraseFromParent();
Owen Anderson
committed
Owen Anderson
committed
}
}
return changed_function;
}