Newer
Older
Caitlin Sadowski
committed
/// For binary operations which assign to a variable (writes), we need to check
Caitlin Sadowski
committed
/// whether we hold any required mutexes.
Caitlin Sadowski
committed
/// FIXME: Deal with non-primitive types.
void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
if (!BO->isAssignmentOp())
return;
Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
checkAccess(LHSExp, AK_Written);
checkDereference(LHSExp, AK_Written);
Caitlin Sadowski
committed
}
/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
Caitlin Sadowski
committed
/// need to ensure we hold any required mutexes.
Caitlin Sadowski
committed
/// FIXME: Deal with non-primitive types.
void BuildLockset::VisitCastExpr(CastExpr *CE) {
if (CE->getCastKind() != CK_LValueToRValue)
return;
Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts();
checkAccess(SubExp, AK_Read);
checkDereference(SubExp, AK_Read);
Caitlin Sadowski
committed
}
/// \brief This function, parameterized by an attribute type, is used to add a
/// set of locks specified as attribute arguments to the lockset.
template <typename AttrType>
void BuildLockset::addLocksToSet(LockKind LK, Attr *Attr,
CXXMemberCallExpr *Exp) {
typedef typename AttrType::args_iterator iterator_type;
SourceLocation ExpLocation = Exp->getExprLoc();
Expr *Parent = Exp->getImplicitObjectArgument();
AttrType *SpecificAttr = cast<AttrType>(Attr);
if (SpecificAttr->args_size() == 0) {
Caitlin Sadowski
committed
// The mutex held is the "this" object.
addLock(ExpLocation, Parent, LK);
return;
}
for (iterator_type I = SpecificAttr->args_begin(),
E = SpecificAttr->args_end(); I != E; ++I)
addLock(ExpLocation, *I, LK);
}
Caitlin Sadowski
committed
/// \brief When visiting CXXMemberCallExprs we need to examine the attributes on
/// the method that is being called and add, remove or check locks in the
/// lockset accordingly.
Caitlin Sadowski
committed
///
Caitlin Sadowski
committed
/// FIXME: For classes annotated with one of the guarded annotations, we need
/// to treat const method calls as reads and non-const method calls as writes,
/// and check that the appropriate locks are held. Non-const method calls with
/// the same signature as const method calls can be also treated as reads.
Caitlin Sadowski
committed
///
/// FIXME: We need to also visit CallExprs to catch/check global functions.
void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) {
NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
SourceLocation ExpLocation = Exp->getExprLoc();
Expr *Parent = Exp->getImplicitObjectArgument();
if(!D || !D->hasAttrs())
return;
AttrVec &ArgAttrs = D->getAttrs();
for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
Attr *Attr = ArgAttrs[i];
switch (Attr->getKind()) {
// When we encounter an exclusive lock function, we need to add the lock
Caitlin Sadowski
committed
// to our lockset with kind exclusive.
case attr::ExclusiveLockFunction:
addLocksToSet<ExclusiveLockFunctionAttr>(LK_Exclusive, Attr, Exp);
break;
// When we encounter a shared lock function, we need to add the lock
Caitlin Sadowski
committed
// to our lockset with kind shared.
case attr::SharedLockFunction:
addLocksToSet<SharedLockFunctionAttr>(LK_Shared, Attr, Exp);
break;
Caitlin Sadowski
committed
// When we encounter an unlock function, we need to remove unlocked
// mutexes from the lockset, and flag a warning if they are not there.
case attr::UnlockFunction: {
UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);
if (UFAttr->args_size() == 0) { // The lock held is the "this" object.
Caitlin Sadowski
committed
removeLock(ExpLocation, Parent);
break;
}
for (UnlockFunctionAttr::args_iterator I = UFAttr->args_begin(),
Caitlin Sadowski
committed
E = UFAttr->args_end(); I != E; ++I)
Caitlin Sadowski
committed
removeLock(ExpLocation, *I);
break;
}
Caitlin Sadowski
committed
case attr::ExclusiveLocksRequired: {
// FIXME: Also use this attribute to add required locks to the initial
// lockset when processing a CFG for a function annotated with this
// attribute.
ExclusiveLocksRequiredAttr *ELRAttr =
cast<ExclusiveLocksRequiredAttr>(Attr);
for (ExclusiveLocksRequiredAttr::args_iterator
I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I) {
Caitlin Sadowski
committed
MutexID Mutex(*I);
warnIfMutexNotHeld(D, Exp, AK_Written, Mutex,
Caitlin Sadowski
committed
diag::warn_fun_requires_lock);
}
break;
}
case attr::SharedLocksRequired: {
// FIXME: Also use this attribute to add required locks to the initial
// lockset when processing a CFG for a function annotated with this
// attribute.
SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr);
for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(),
E = SLRAttr->args_end(); I != E; ++I) {
Caitlin Sadowski
committed
MutexID Mutex(*I);
warnIfMutexNotHeld(D, Exp, AK_Read, Mutex,
Caitlin Sadowski
committed
diag::warn_fun_requires_lock);
}
break;
}
case attr::LocksExcluded: {
LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
E = LEAttr->args_end(); I != E; ++I) {
Caitlin Sadowski
committed
MutexID Mutex(*I);
if (locksetContains(Mutex))
S.Diag(ExpLocation, diag::warn_fun_excludes_mutex)
<< D->getName() << Mutex.getName();
Caitlin Sadowski
committed
}
break;
}
case attr::LockReturned:
// FIXME: Deal with this attribute.
break;
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
// Ignore other (non thread-safety) attributes
default:
break;
}
}
}
typedef std::pair<SourceLocation, PartialDiagnostic> DelayedDiag;
typedef llvm::SmallVector<DelayedDiag, 4> DiagList;
struct SortDiagBySourceLocation {
Sema &S;
SortDiagBySourceLocation(Sema &S) : S(S) {}
bool operator()(const DelayedDiag &left, const DelayedDiag &right) {
// Although this call will be slow, this is only called when outputting
// multiple warnings.
return S.getSourceManager().isBeforeInTranslationUnit(left.first,
right.first);
}
};
} // end anonymous namespace
/// \brief Emit all buffered diagnostics in order of sourcelocation.
/// We need to output diagnostics produced while iterating through
/// the lockset in deterministic order, so this function orders diagnostics
/// and outputs them.
static void EmitDiagnostics(Sema &S, DiagList &D) {
SortDiagBySourceLocation SortDiagBySL(S);
sort(D.begin(), D.end(), SortDiagBySL);
for (DiagList::iterator I = D.begin(), E = D.end(); I != E; ++I)
S.Diag(I->first, I->second);
}
static Lockset warnIfNotInFirstSetOrNotSameKind(Sema &S, const Lockset LSet1,
const Lockset LSet2,
DiagList &Warnings,
Lockset Intersection,
Lockset::Factory &Fact) {
for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
Caitlin Sadowski
committed
const MutexID &LSet2Mutex = I.getKey();
const LockData &LSet2LockData = I.getData();
Caitlin Sadowski
committed
if (const LockData *LD = LSet1.lookup(LSet2Mutex)) {
if (LD->LKind != LSet2LockData.LKind) {
PartialDiagnostic Warning =
Caitlin Sadowski
committed
S.PDiag(diag::warn_lock_exclusive_and_shared) << LSet2Mutex.getName();
Caitlin Sadowski
committed
S.PDiag(diag::note_lock_exclusive_and_shared) << LSet2Mutex.getName();
Warnings.push_back(DelayedDiag(LSet2LockData.AcquireLoc, Warning));
Warnings.push_back(DelayedDiag(LD->AcquireLoc, Note));
if (LD->LKind != LK_Exclusive)
Caitlin Sadowski
committed
Intersection = Fact.add(Intersection, LSet2Mutex, LSet2LockData);
}
} else {
PartialDiagnostic Warning =
Caitlin Sadowski
committed
S.PDiag(diag::warn_lock_at_end_of_scope) << LSet2Mutex.getName();
Warnings.push_back(DelayedDiag(LSet2LockData.AcquireLoc, Warning));
}
}
return Intersection;
}
/// \brief Compute the intersection of two locksets and issue warnings for any
/// locks in the symmetric difference.
///
/// This function is used at a merge point in the CFG when comparing the lockset
/// of each branch being merged. For example, given the following sequence:
/// A; if () then B; else C; D; we need to check that the lockset after B and C
/// are the same. In the event of a difference, we use the intersection of these
/// two locksets at the start of D.
static Lockset intersectAndWarn(Sema &S, const Lockset LSet1,
const Lockset LSet2,
Lockset::Factory &Fact) {
Lockset Intersection = LSet1;
DiagList Warnings;
Intersection = warnIfNotInFirstSetOrNotSameKind(S, LSet1, LSet2, Warnings,
Intersection, Fact);
for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) {
if (!LSet2.contains(I.getKey())) {
Caitlin Sadowski
committed
const MutexID &Mutex = I.getKey();
const LockData &MissingLock = I.getData();
PartialDiagnostic Warning =
Caitlin Sadowski
committed
S.PDiag(diag::warn_lock_at_end_of_scope) << Mutex.getName();
Warnings.push_back(DelayedDiag(MissingLock.AcquireLoc, Warning));
Intersection = Fact.remove(Intersection, Mutex);
}
}
EmitDiagnostics(S, Warnings);
return Intersection;
}
/// \brief Returns the location of the first Stmt in a Block.
static SourceLocation getFirstStmtLocation(CFGBlock *Block) {
SourceLocation Loc;
for (CFGBlock::const_iterator BI = Block->begin(), BE = Block->end();
BI != BE; ++BI) {
if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&(*BI))) {
Loc = CfgStmt->getStmt()->getLocStart();
if (Loc.isValid()) return Loc;
}
}
if (Stmt *S = Block->getTerminator().getStmt()) {
Loc = S->getLocStart();
if (Loc.isValid()) return Loc;
}
}
/// \brief Warn about different locksets along backedges of loops.
/// This function is called when we encounter a back edge. At that point,
/// we need to verify that the lockset before taking the backedge is the
/// same as the lockset before entering the loop.
///
Caitlin Sadowski
committed
/// \param LoopEntrySet Locks before starting the loop
/// \param LoopReentrySet Locks in the last CFG block of the loop
static void warnBackEdgeUnequalLocksets(Sema &S, const Lockset LoopReentrySet,
const Lockset LoopEntrySet,
SourceLocation FirstLocInLoop,
Lockset::Factory &Fact) {
assert(FirstLocInLoop.isValid());
DiagList Warnings;
// Warn for locks held at the start of the loop, but not the end.
for (Lockset::iterator I = LoopEntrySet.begin(), E = LoopEntrySet.end();
I != E; ++I) {
if (!LoopReentrySet.contains(I.getKey())) {
Caitlin Sadowski
committed
const MutexID &Mutex = I.getKey();
// We report this error at the location of the first statement in a loop
PartialDiagnostic Warning =
Caitlin Sadowski
committed
S.PDiag(diag::warn_expecting_lock_held_on_loop) << Mutex.getName();
Warnings.push_back(DelayedDiag(FirstLocInLoop, Warning));
}
}
// Warn for locks held at the end of the loop, but not at the start.
warnIfNotInFirstSetOrNotSameKind(S, LoopEntrySet, LoopReentrySet, Warnings,
LoopReentrySet, Fact);
EmitDiagnostics(S, Warnings);
}
/// \brief Check a function's CFG for thread-safety violations.
///
Caitlin Sadowski
committed
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
/// at the end of each block, and issue warnings for thread safety violations.
/// Each block in the CFG is traversed exactly once.
static void checkThreadSafety(Sema &S, AnalysisContext &AC) {
CFG *CFGraph = AC.getCFG();
if (!CFGraph) return;
const Decl *D = AC.getDecl();
if (D && D->getAttr<NoThreadSafetyAnalysisAttr>()) return;
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
Lockset::Factory LocksetFactory;
// FIXME: Swith to SmallVector? Otherwise improve performance impact?
std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(),
LocksetFactory.getEmptyMap());
std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(),
LocksetFactory.getEmptyMap());
// We need to explore the CFG via a "topological" ordering.
// That way, we will be guaranteed to have information about required
// predecessor locksets when exploring a new block.
TopologicallySortedCFG SortedGraph(CFGraph);
CFGBlockSet VisitedBlocks(CFGraph);
for (TopologicallySortedCFG::iterator I = SortedGraph.begin(),
E = SortedGraph.end(); I!= E; ++I) {
const CFGBlock *CurrBlock = *I;
int CurrBlockID = CurrBlock->getBlockID();
VisitedBlocks.insert(CurrBlock);
// Use the default initial lockset in case there are no predecessors.
Lockset &Entryset = EntryLocksets[CurrBlockID];
Lockset &Exitset = ExitLocksets[CurrBlockID];
// Iterate through the predecessor blocks and warn if the lockset for all
// predecessors is not the same. We take the entry lockset of the current
// block to be the intersection of all previous locksets.
// FIXME: By keeping the intersection, we may output more errors in future
// for a lock which is not in the intersection, but was in the union. We
// may want to also keep the union in future. As an example, let's say
Caitlin Sadowski
committed
// the intersection contains Mutex L, and the union contains L and M.
// Later we unlock M. At this point, we would output an error because we
// never locked M; although the real error is probably that we forgot to
// lock M on all code paths. Conversely, let's say that later we lock M.
// In this case, we should compare against the intersection instead of the
// union because the real error is probably that we forgot to unlock M on
// all code paths.
bool LocksetInitialized = false;
for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
PE = CurrBlock->pred_end(); PI != PE; ++PI) {
// if *PI -> CurrBlock is a back edge
if (*PI == 0 || !VisitedBlocks.alreadySet(*PI))
continue;
int PrevBlockID = (*PI)->getBlockID();
if (!LocksetInitialized) {
Entryset = ExitLocksets[PrevBlockID];
LocksetInitialized = true;
} else {
Entryset = intersectAndWarn(S, Entryset, ExitLocksets[PrevBlockID],
LocksetFactory);
}
}
BuildLockset LocksetBuilder(S, Entryset, LocksetFactory);
for (CFGBlock::const_iterator BI = CurrBlock->begin(),
BE = CurrBlock->end(); BI != BE; ++BI) {
if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI))
LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt()));
}
Exitset = LocksetBuilder.getLockset();
// For every back edge from CurrBlock (the end of the loop) to another block
// (FirstLoopBlock) we need to check that the Lockset of Block is equal to
// the one held at the beginning of FirstLoopBlock. We can look up the
// Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
SE = CurrBlock->succ_end(); SI != SE; ++SI) {
// if CurrBlock -> *SI is *not* a back edge
if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
continue;
CFGBlock *FirstLoopBlock = *SI;
SourceLocation FirstLoopLocation = getFirstStmtLocation(FirstLoopBlock);
assert(FirstLoopLocation.isValid());
// Fail gracefully in release code.
if (!FirstLoopLocation.isValid())
continue;
Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()];
Lockset LoopEnd = ExitLocksets[CurrBlockID];
warnBackEdgeUnequalLocksets(S, LoopEnd, PreLoop, FirstLoopLocation,
LocksetFactory);
}
}
Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()];
if (!FinalLockset.isEmpty()) {
DiagList Warnings;
for (Lockset::iterator I=FinalLockset.begin(), E=FinalLockset.end();
I != E; ++I) {
Caitlin Sadowski
committed
const MutexID &Mutex = I.getKey();
const LockData &MissingLock = I.getData();
std::string FunName = "<unknown>";
if (const NamedDecl *ContextDecl = dyn_cast<NamedDecl>(AC.getDecl())) {
FunName = ContextDecl->getDeclName().getAsString();
}
PartialDiagnostic Warning =
Caitlin Sadowski
committed
S.PDiag(diag::warn_no_unlock)
<< Mutex.getName() << FunName;
Warnings.push_back(DelayedDiag(MissingLock.AcquireLoc, Warning));
}
EmitDiagnostics(S, Warnings);
}
}
Ted Kremenek
committed
//===----------------------------------------------------------------------===//
// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
// warnings on a function, method, or block.
//===----------------------------------------------------------------------===//
clang::sema::AnalysisBasedWarnings::Policy::Policy() {
Ted Kremenek
committed
enableCheckFallThrough = 1;
enableCheckUnreachable = 0;
enableThreadSafetyAnalysis = 0;
Ted Kremenek
committed
clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s)
: S(s),
NumFunctionsAnalyzed(0),
NumFunctionsWithBadCFGs(0),
NumCFGBlocks(0),
MaxCFGBlocksPerFunction(0),
NumUninitAnalysisFunctions(0),
NumUninitAnalysisVariables(0),
MaxUninitAnalysisVariablesPerFunction(0),
NumUninitAnalysisBlockVisits(0),
MaxUninitAnalysisBlockVisitsPerFunction(0) {
Diagnostic &D = S.getDiagnostics();
DefaultPolicy.enableCheckUnreachable = (unsigned)
(D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) !=
Diagnostic::Ignored);
DefaultPolicy.enableThreadSafetyAnalysis = (unsigned)
(D.getDiagnosticLevel(diag::warn_double_lock, SourceLocation()) !=
Diagnostic::Ignored);
Ted Kremenek
committed
}
Ted Kremenek
committed
static void flushDiagnostics(Sema &S, sema::FunctionScopeInfo *fscope) {
Chris Lattner
committed
for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
Ted Kremenek
committed
i = fscope->PossiblyUnreachableDiags.begin(),
e = fscope->PossiblyUnreachableDiags.end();
i != e; ++i) {
const sema::PossiblyUnreachableDiag &D = *i;
S.Diag(D.Loc, D.PD);
}
}
void clang::sema::
AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
sema::FunctionScopeInfo *fscope,
const Decl *D, const BlockExpr *blkExpr) {
Ted Kremenek
committed
// We avoid doing analysis-based warnings when there are errors for
// two reasons:
// (1) The CFGs often can't be constructed (if the body is invalid), so
// don't bother trying.
// (2) The code already has problems; running the analysis just takes more
// time.
Ted Kremenek
committed
Diagnostic &Diags = S.getDiagnostics();
// Do not do any analysis for declarations in system headers if we are
// going to just ignore them.
Ted Kremenek
committed
if (Diags.getSuppressSystemWarnings() &&
S.SourceMgr.isInSystemHeader(D->getLocation()))
return;
// For code in dependent contexts, we'll do this at instantiation time.
if (cast<DeclContext>(D)->isDependentContext())
return;
Ted Kremenek
committed
Ted Kremenek
committed
if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred()) {
// Flush out any possibly unreachable diagnostics.
flushDiagnostics(S, fscope);
return;
}
Ted Kremenek
committed
const Stmt *Body = D->getBody();
assert(Body);
Ted Kremenek
committed
AnalysisContext AC(D, 0);
Ted Kremenek
committed
// Don't generate EH edges for CallExprs as we'd like to avoid the n^2
// explosion for destrutors that can result and the compile time hit.
Ted Kremenek
committed
AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true;
AC.getCFGBuildOptions().AddEHEdges = false;
AC.getCFGBuildOptions().AddInitializers = true;
AC.getCFGBuildOptions().AddImplicitDtors = true;
Ted Kremenek
committed
// Force that certain expressions appear as CFGElements in the CFG. This
// is used to speed up various analyses.
// FIXME: This isn't the right factoring. This is here for initial
// prototyping, but we need a way for analyses to say what expressions they
// expect to always be CFGElements and then fill in the BuildOptions
// appropriately. This is essentially a layering violation.
Ted Kremenek
committed
if (P.enableCheckUnreachable) {
// Unreachable code analysis requires a linearized CFG.
AC.getCFGBuildOptions().setAllAlwaysAdd();
}
else {
AC.getCFGBuildOptions()
.setAlwaysAdd(Stmt::BinaryOperatorClass)
.setAlwaysAdd(Stmt::BlockExprClass)
.setAlwaysAdd(Stmt::CStyleCastExprClass)
.setAlwaysAdd(Stmt::DeclRefExprClass)
.setAlwaysAdd(Stmt::ImplicitCastExprClass)
.setAlwaysAdd(Stmt::UnaryOperatorClass);
}
Ted Kremenek
committed
// Construct the analysis context with the specified CFG build options.
Ted Kremenek
committed
// Emit delayed diagnostics.
if (!fscope->PossiblyUnreachableDiags.empty()) {
bool analyzed = false;
// Register the expressions with the CFGBuilder.
Chris Lattner
committed
for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
i = fscope->PossiblyUnreachableDiags.begin(),
e = fscope->PossiblyUnreachableDiags.end();
i != e; ++i) {
if (const Stmt *stmt = i->stmt)
AC.registerForcedBlockExpression(stmt);
}
if (AC.getCFG()) {
analyzed = true;
Chris Lattner
committed
for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
i = fscope->PossiblyUnreachableDiags.begin(),
e = fscope->PossiblyUnreachableDiags.end();
i != e; ++i)
{
const sema::PossiblyUnreachableDiag &D = *i;
bool processed = false;
if (const Stmt *stmt = i->stmt) {
const CFGBlock *block = AC.getBlockForRegisteredExpression(stmt);
assert(block);
Ted Kremenek
committed
if (CFGReverseBlockReachabilityAnalysis *cra = AC.getCFGReachablityAnalysis()) {
Ted Kremenek
committed
// Can this block be reached from the entrance?
if (cra->isReachable(&AC.getCFG()->getEntry(), block))
Ted Kremenek
committed
S.Diag(D.Loc, D.PD);
processed = true;
Ted Kremenek
committed
}
}
if (!processed) {
// Emit the warning anyway if we cannot map to a basic block.
S.Diag(D.Loc, D.PD);
Ted Kremenek
committed
}
}
Ted Kremenek
committed
if (!analyzed)
flushDiagnostics(S, fscope);
}
Ted Kremenek
committed
// Warning: check missing 'return'
if (P.enableCheckFallThrough) {
Ted Kremenek
committed
const CheckFallThroughDiagnostics &CD =
(isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock()
: CheckFallThroughDiagnostics::MakeForFunction(D));
CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC);
Ted Kremenek
committed
}
// Warning: check for unreachable code
Ted Kremenek
committed
if (P.enableCheckUnreachable)
Ted Kremenek
committed
CheckUnreachable(S, AC);
// Check for thread safety violations
if (P.enableThreadSafetyAnalysis)
checkThreadSafety(S, AC);
if (Diags.getDiagnosticLevel(diag::warn_uninit_var, D->getLocStart())
Ted Kremenek
committed
!= Diagnostic::Ignored ||
Diags.getDiagnosticLevel(diag::warn_maybe_uninit_var, D->getLocStart())
Ted Kremenek
committed
if (CFG *cfg = AC.getCFG()) {
UninitValsDiagReporter reporter(S);
Benjamin Kramer
committed
std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats));
runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC,
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
reporter, stats);
if (S.CollectStats && stats.NumVariablesAnalyzed > 0) {
++NumUninitAnalysisFunctions;
NumUninitAnalysisVariables += stats.NumVariablesAnalyzed;
NumUninitAnalysisBlockVisits += stats.NumBlockVisits;
MaxUninitAnalysisVariablesPerFunction =
std::max(MaxUninitAnalysisVariablesPerFunction,
stats.NumVariablesAnalyzed);
MaxUninitAnalysisBlockVisitsPerFunction =
std::max(MaxUninitAnalysisBlockVisitsPerFunction,
stats.NumBlockVisits);
}
}
}
// Collect statistics about the CFG if it was built.
if (S.CollectStats && AC.isCFGBuilt()) {
++NumFunctionsAnalyzed;
if (CFG *cfg = AC.getCFG()) {
// If we successfully built a CFG for this context, record some more
// detail information about it.
NumCFGBlocks += cfg->getNumBlockIDs();
MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction,
cfg->getNumBlockIDs());
} else {
++NumFunctionsWithBadCFGs;
Ted Kremenek
committed
}
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
void clang::sema::AnalysisBasedWarnings::PrintStats() const {
llvm::errs() << "\n*** Analysis Based Warnings Stats:\n";
unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs;
unsigned AvgCFGBlocksPerFunction =
!NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt;
llvm::errs() << NumFunctionsAnalyzed << " functions analyzed ("
<< NumFunctionsWithBadCFGs << " w/o CFGs).\n"
<< " " << NumCFGBlocks << " CFG blocks built.\n"
<< " " << AvgCFGBlocksPerFunction
<< " average CFG blocks per function.\n"
<< " " << MaxCFGBlocksPerFunction
<< " max CFG blocks per function.\n";
unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0
: NumUninitAnalysisVariables/NumUninitAnalysisFunctions;
unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0
: NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions;
llvm::errs() << NumUninitAnalysisFunctions
<< " functions analyzed for uninitialiazed variables\n"
<< " " << NumUninitAnalysisVariables << " variables analyzed.\n"
<< " " << AvgUninitVariablesPerFunction
<< " average variables per function.\n"
<< " " << MaxUninitAnalysisVariablesPerFunction
<< " max variables per function.\n"
<< " " << NumUninitAnalysisBlockVisits << " block visits.\n"
<< " " << AvgUninitBlockVisitsPerFunction
<< " average block visits per function.\n"
<< " " << MaxUninitAnalysisBlockVisitsPerFunction
<< " max block visits per function.\n";
}