Skip to content
AnalysisBasedWarnings.cpp 63.6 KiB
Newer Older

  /// \brief Returns true if the lockset contains a lock with the passed in
  /// locktype.
  bool locksetContains(MutexID Lock, LockKind KindRequested) const {
    const LockData *LockHeld = LSet.lookup(Lock);
    return (LockHeld && KindRequested == LockHeld->LKind);
  }
  /// \brief Returns true if the lockset contains a lock with at least the
  /// passed in locktype. So for example, if we pass in LK_Shared, this function
  /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
  /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
  bool locksetContainsAtLeast(MutexID Lock, LockKind KindRequested) const {
    switch (KindRequested) {
      case LK_Shared:
        return locksetContains(Lock);
      case LK_Exclusive:
        return locksetContains(Lock, KindRequested);
    }
  }

  BuildLockset(ThreadSafetyHandler &Handler, Lockset LS, Lockset::Factory &F)
    : StmtVisitor<BuildLockset>(), Handler(Handler), LSet(LS),
  void VisitUnaryOperator(UnaryOperator *UO);
  void VisitBinaryOperator(BinaryOperator *BO);
  void VisitCastExpr(CastExpr *CE);
  void VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp);
};

/// \brief Add a new lock to the lockset, warning if the lock is already there.
/// \param LockLoc The source location of the acquire
/// \param LockExp The lock expression corresponding to the lock to be added
void BuildLockset::addLock(SourceLocation LockLoc, Expr *LockExp,
                           LockKind LK) {
  // FIXME: deal with acquired before/after annotations
  MutexID Mutex(LockExp);
  LockData NewLock(LockLoc, LK);
  // FIXME: Don't always warn when we have support for reentrant locks.
    Handler.handleDoubleLock(Mutex.getName(), LockLoc);
  LSet = LocksetFactory.add(LSet, Mutex, NewLock);
}

/// \brief Remove a lock from the lockset, warning if the lock is not there.
/// \param LockExp The lock expression corresponding to the lock to be removed
/// \param UnlockLoc The source location of the unlock (only used in error msg)
void BuildLockset::removeLock(SourceLocation UnlockLoc, Expr *LockExp) {
  Lockset NewLSet = LocksetFactory.remove(LSet, Mutex);
    Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) {
  if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp))
    return DR->getDecl();

  if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
    return ME->getMemberDecl();

  return 0;
/// \brief Warn if the LSet does not contain a lock sufficient to protect access
/// of at least the passed in AccessType.
void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
                                      AccessKind AK, Expr *MutexExp,
                                      ProtectedOperationKind POK) {
  LockKind LK = getLockKindFromAccessKind(AK);
  MutexID Mutex(MutexExp);
  if (!locksetContainsAtLeast(Mutex, LK))
    Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc());
/// \brief This method identifies variable dereferences and checks pt_guarded_by
/// and pt_guarded_var annotations. Note that we only check these annotations
/// at the time a pointer is dereferenced.
/// FIXME: We need to check for other types of pointer dereferences
/// (e.g. [], ->) and deal with them here.
/// \param Exp An expression that has been read or written.
void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) {
  UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp);
  if (!UO || UO->getOpcode() != clang::UO_Deref)
    return;
  Exp = UO->getSubExpr()->IgnoreParenCasts();

  const ValueDecl *D = getValueDecl(Exp);
  if(!D || !D->hasAttrs())
    return;

  if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty())
    Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc());
  for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
    if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i]))
      warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference);
}

/// \brief Checks guarded_by and guarded_var attributes.
/// Whenever we identify an access (read or write) of a DeclRefExpr or
/// MemberExpr, we need to check whether there are any guarded_by or
/// guarded_var attributes, and make sure we hold the appropriate mutexes.
void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
  const ValueDecl *D = getValueDecl(Exp);
  if(!D || !D->hasAttrs())
    return;

  if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty())
    Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc());
  for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
    if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i]))
      warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess);
}

/// \brief For unary operations which read and write a variable, we need to
/// check whether we hold any required mutexes. Reads are checked in
/// VisitCastExpr.
void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
  switch (UO->getOpcode()) {
    case clang::UO_PostDec:
    case clang::UO_PostInc:
    case clang::UO_PreDec:
    case clang::UO_PreInc: {
      Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts();
      checkAccess(SubExp, AK_Written);
      checkDereference(SubExp, AK_Written);
      break;
    }
    default:
      break;
  }
}

/// For binary operations which assign to a variable (writes), we need to check
/// FIXME: Deal with non-primitive types.
void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
  if (!BO->isAssignmentOp())
    return;
  Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
  checkAccess(LHSExp, AK_Written);
  checkDereference(LHSExp, AK_Written);
}

/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
/// need to ensure we hold any required mutexes.
/// FIXME: Deal with non-primitive types.
void BuildLockset::VisitCastExpr(CastExpr *CE) {
  if (CE->getCastKind() != CK_LValueToRValue)
    return;
  Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts();
  checkAccess(SubExp, AK_Read);
  checkDereference(SubExp, AK_Read);
/// \brief This function, parameterized by an attribute type, is used to add a
/// set of locks specified as attribute arguments to the lockset.
template <typename AttrType>
void BuildLockset::addLocksToSet(LockKind LK, Attr *Attr,
                                 CXXMemberCallExpr *Exp) {
  typedef typename AttrType::args_iterator iterator_type;
  SourceLocation ExpLocation = Exp->getExprLoc();
  Expr *Parent = Exp->getImplicitObjectArgument();
  AttrType *SpecificAttr = cast<AttrType>(Attr);

  if (SpecificAttr->args_size() == 0) {
    addLock(ExpLocation, Parent, LK);
    return;
  }

  for (iterator_type I = SpecificAttr->args_begin(),
       E = SpecificAttr->args_end(); I != E; ++I)
    addLock(ExpLocation, *I, LK);
}
/// \brief When visiting CXXMemberCallExprs we need to examine the attributes on
/// the method that is being called and add, remove or check locks in the
/// lockset accordingly.
/// FIXME: For classes annotated with one of the guarded annotations, we need
/// to treat const method calls as reads and non-const method calls as writes,
/// and check that the appropriate locks are held. Non-const method calls with
/// the same signature as const method calls can be also treated as reads.
///
/// FIXME: We need to also visit CallExprs to catch/check global functions.
void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) {
  NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());

  SourceLocation ExpLocation = Exp->getExprLoc();
  Expr *Parent = Exp->getImplicitObjectArgument();

  if(!D || !D->hasAttrs())
    return;

  AttrVec &ArgAttrs = D->getAttrs();
  for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
    Attr *Attr = ArgAttrs[i];
    switch (Attr->getKind()) {
      // When we encounter an exclusive lock function, we need to add the lock
      case attr::ExclusiveLockFunction:
        addLocksToSet<ExclusiveLockFunctionAttr>(LK_Exclusive, Attr, Exp);
        break;
      // When we encounter a shared lock function, we need to add the lock
      case attr::SharedLockFunction:
        addLocksToSet<SharedLockFunctionAttr>(LK_Shared, Attr, Exp);
      // When we encounter an unlock function, we need to remove unlocked
      // mutexes from the lockset, and flag a warning if they are not there.
      case attr::UnlockFunction: {
        UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);

        if (UFAttr->args_size() == 0) { // The lock held is the "this" object.
          break;
        }

        for (UnlockFunctionAttr::args_iterator I = UFAttr->args_begin(),
      case attr::ExclusiveLocksRequired: {
        // FIXME: Also use this attribute to add required locks to the initial
        // lockset when processing a CFG for a function annotated with this
        // attribute.
        ExclusiveLocksRequiredAttr *ELRAttr =
            cast<ExclusiveLocksRequiredAttr>(Attr);

        for (ExclusiveLocksRequiredAttr::args_iterator
             I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I)
          warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall);
        break;
      }

      case attr::SharedLocksRequired: {
        // FIXME: Also use this attribute to add required locks to the initial
        // lockset when processing a CFG for a function annotated with this
        // attribute.
        SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr);

        for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(),
             E = SLRAttr->args_end(); I != E; ++I)
          warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall);
        break;
      }

      case attr::LocksExcluded: {
        LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
        for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
            E = LEAttr->args_end(); I != E; ++I) {
            Handler.handleFunExcludesLock(D->getName(), Mutex.getName(),
                                          ExpLocation);
        }
        break;
      }

      case attr::LockReturned:
        // FIXME: Deal with this attribute.
        break;

      // Ignore other (non thread-safety) attributes
      default:
        break;
    }
  }
}

} // end anonymous namespace

/// \brief Flags a warning for each lock that is in LSet2 but not LSet1, or
/// else mutexes that are held shared in one lockset and exclusive in the other.
static Lockset warnIfNotInFirstSetOrNotSameKind(ThreadSafetyHandler &Handler,
                                                const Lockset LSet1,
                                                const Lockset LSet2,
                                                Lockset Intersection,
                                                Lockset::Factory &Fact) {
  for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
    const LockData &LSet2LockData = I.getData();
    if (const LockData *LD = LSet1.lookup(LSet2Mutex)) {
      if (LD->LKind != LSet2LockData.LKind) {
        Handler.handleExclusiveAndShared(LSet2Mutex.getName(),
                                         LSet2LockData.AcquireLoc,
                                         LD->AcquireLoc);
        if (LD->LKind != LK_Exclusive)
          Intersection = Fact.add(Intersection, LSet2Mutex, LSet2LockData);
      Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(),
                                        LSet2LockData.AcquireLoc);
/// \brief Compute the intersection of two locksets and issue warnings for any
/// locks in the symmetric difference.
///
/// This function is used at a merge point in the CFG when comparing the lockset
/// of each branch being merged. For example, given the following sequence:
/// A; if () then B; else C; D; we need to check that the lockset after B and C
/// are the same. In the event of a difference, we use the intersection of these
/// two locksets at the start of D.
static Lockset intersectAndWarn(ThreadSafetyHandler &Handler,
                                const Lockset LSet1, const Lockset LSet2,
                                Lockset::Factory &Fact) {
  Lockset Intersection = LSet1;
  Intersection = warnIfNotInFirstSetOrNotSameKind(Handler, LSet1, LSet2,
                                                  Intersection, Fact);

  for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) {
    if (!LSet2.contains(I.getKey())) {
      const MutexID &Mutex = I.getKey();
      const LockData &MissingLock = I.getData();
      Handler.handleMutexHeldEndOfScope(Mutex.getName(),
                                        MissingLock.AcquireLoc);
      Intersection = Fact.remove(Intersection, Mutex);
    }
  }
  return Intersection;
}

/// \brief Returns the location of the first Stmt in a Block.
static SourceLocation getFirstStmtLocation(CFGBlock *Block) {
  for (CFGBlock::const_iterator BI = Block->begin(), BE = Block->end();
       BI != BE; ++BI) {
    if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&(*BI))) {
      Loc = CfgStmt->getStmt()->getLocStart();
      if (Loc.isValid()) return Loc;
    }
  }
  if (Stmt *S = Block->getTerminator().getStmt()) {
    Loc = S->getLocStart();
    if (Loc.isValid()) return Loc;
}

/// \brief Warn about different locksets along backedges of loops.
/// This function is called when we encounter a back edge. At that point,
/// we need to verify that the lockset before taking the backedge is the
/// same as the lockset before entering the loop.
///
/// \param LoopEntrySet Locks before starting the loop
/// \param LoopReentrySet Locks in the last CFG block of the loop
static void warnBackEdgeUnequalLocksets(ThreadSafetyHandler &Handler,
                                        const Lockset LoopReentrySet,
                                        SourceLocation FirstLocInLoop,
                                        Lockset::Factory &Fact) {
  assert(FirstLocInLoop.isValid());
  // Warn for locks held at the start of the loop, but not the end.
  for (Lockset::iterator I = LoopEntrySet.begin(), E = LoopEntrySet.end();
       I != E; ++I) {
    if (!LoopReentrySet.contains(I.getKey())) {
      // We report this error at the location of the first statement in a loop
      Handler.handleNoLockLoopEntry(I.getKey().getName(), FirstLocInLoop);
    }
  }

  // Warn for locks held at the end of the loop, but not at the start.
  warnIfNotInFirstSetOrNotSameKind(Handler, LoopEntrySet, LoopReentrySet,
                                   LoopReentrySet, Fact);

namespace clang { namespace thread_safety {
/// \brief Check a function's CFG for thread-safety violations.
///
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
/// at the end of each block, and issue warnings for thread safety violations.
/// Each block in the CFG is traversed exactly once.
void runThreadSafetyAnalysis(AnalysisContext &AC,
                             ThreadSafetyHandler &Handler) {
  CFG *CFGraph = AC.getCFG();
  if (!CFGraph) return;
  const Decl *D = AC.getDecl();
  if (D && D->getAttr<NoThreadSafetyAnalysisAttr>()) return;

  Lockset::Factory LocksetFactory;

  // FIXME: Swith to SmallVector? Otherwise improve performance impact?
  std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(),
                                     LocksetFactory.getEmptyMap());
  std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(),
                                    LocksetFactory.getEmptyMap());

  // We need to explore the CFG via a "topological" ordering.
  // That way, we will be guaranteed to have information about required
  // predecessor locksets when exploring a new block.
  TopologicallySortedCFG SortedGraph(CFGraph);
  CFGBlockSet VisitedBlocks(CFGraph);

  for (TopologicallySortedCFG::iterator I = SortedGraph.begin(),
       E = SortedGraph.end(); I!= E; ++I) {
    const CFGBlock *CurrBlock = *I;
    int CurrBlockID = CurrBlock->getBlockID();

    VisitedBlocks.insert(CurrBlock);

    // Use the default initial lockset in case there are no predecessors.
    Lockset &Entryset = EntryLocksets[CurrBlockID];
    Lockset &Exitset = ExitLocksets[CurrBlockID];

    // Iterate through the predecessor blocks and warn if the lockset for all
    // predecessors is not the same. We take the entry lockset of the current
    // block to be the intersection of all previous locksets.
    // FIXME: By keeping the intersection, we may output more errors in future
    // for a lock which is not in the intersection, but was in the union. We
    // may want to also keep the union in future. As an example, let's say
    // the intersection contains Mutex L, and the union contains L and M.
    // Later we unlock M. At this point, we would output an error because we
    // never locked M; although the real error is probably that we forgot to
    // lock M on all code paths. Conversely, let's say that later we lock M.
    // In this case, we should compare against the intersection instead of the
    // union because the real error is probably that we forgot to unlock M on
    // all code paths.
    bool LocksetInitialized = false;
    for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
         PE  = CurrBlock->pred_end(); PI != PE; ++PI) {

      // if *PI -> CurrBlock is a back edge
      if (*PI == 0 || !VisitedBlocks.alreadySet(*PI))
        continue;

      int PrevBlockID = (*PI)->getBlockID();
      if (!LocksetInitialized) {
        Entryset = ExitLocksets[PrevBlockID];
        LocksetInitialized = true;
      } else {
        Entryset = intersectAndWarn(Handler, Entryset,
                                    ExitLocksets[PrevBlockID], LocksetFactory);
    BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory);
    for (CFGBlock::const_iterator BI = CurrBlock->begin(),
         BE = CurrBlock->end(); BI != BE; ++BI) {
      if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI))
        LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt()));
    }
    Exitset = LocksetBuilder.getLockset();

    // For every back edge from CurrBlock (the end of the loop) to another block
    // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
    // the one held at the beginning of FirstLoopBlock. We can look up the
    // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
    for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
         SE  = CurrBlock->succ_end(); SI != SE; ++SI) {

      // if CurrBlock -> *SI is *not* a back edge
      if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
        continue;

      CFGBlock *FirstLoopBlock = *SI;
      SourceLocation FirstLoopLocation = getFirstStmtLocation(FirstLoopBlock);

      assert(FirstLoopLocation.isValid());
      // Fail gracefully in release code.
      if (!FirstLoopLocation.isValid())
        continue;

      Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()];
      Lockset LoopEnd = ExitLocksets[CurrBlockID];
      warnBackEdgeUnequalLocksets(Handler, LoopEnd, PreLoop, FirstLoopLocation,
    }
  }

  Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()];
  if (!FinalLockset.isEmpty()) {
    for (Lockset::iterator I=FinalLockset.begin(), E=FinalLockset.end();
         I != E; ++I) {
      const MutexID &Mutex = I.getKey();
      const LockData &MissingLock = I.getData();

      std::string FunName = "<unknown>";
      if (const NamedDecl *ContextDecl = dyn_cast<NamedDecl>(AC.getDecl())) {
        FunName = ContextDecl->getDeclName().getAsString();
      }

      Handler.handleNoUnlock(Mutex.getName(), FunName, MissingLock.AcquireLoc);
}} // end namespace clang::thread_safety

//===----------------------------------------------------------------------===//
// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
//  warnings on a function, method, or block.
//===----------------------------------------------------------------------===//

clang::sema::AnalysisBasedWarnings::Policy::Policy() {
clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s)
  : S(s),
    NumFunctionsAnalyzed(0),
    MaxCFGBlocksPerFunction(0),
    NumUninitAnalysisFunctions(0),
    NumUninitAnalysisVariables(0),
    MaxUninitAnalysisVariablesPerFunction(0),
    NumUninitAnalysisBlockVisits(0),
    MaxUninitAnalysisBlockVisitsPerFunction(0) {
  Diagnostic &D = S.getDiagnostics();
  DefaultPolicy.enableCheckUnreachable = (unsigned)
    (D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) !=
        Diagnostic::Ignored);
  DefaultPolicy.enableThreadSafetyAnalysis = (unsigned)
    (D.getDiagnosticLevel(diag::warn_double_lock, SourceLocation()) !=
     Diagnostic::Ignored);

static void flushDiagnostics(Sema &S, sema::FunctionScopeInfo *fscope) {
  for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
       i = fscope->PossiblyUnreachableDiags.begin(),
       e = fscope->PossiblyUnreachableDiags.end();
       i != e; ++i) {
    const sema::PossiblyUnreachableDiag &D = *i;
    S.Diag(D.Loc, D.PD);
  }
}

void clang::sema::
AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
                                     const Decl *D, const BlockExpr *blkExpr) {
  // We avoid doing analysis-based warnings when there are errors for
  // two reasons:
  // (1) The CFGs often can't be constructed (if the body is invalid), so
  //     don't bother trying.
  // (2) The code already has problems; running the analysis just takes more
  //     time.
  // Do not do any analysis for declarations in system headers if we are
  // going to just ignore them.
  if (Diags.getSuppressSystemWarnings() &&
      S.SourceMgr.isInSystemHeader(D->getLocation()))
    return;

  // For code in dependent contexts, we'll do this at instantiation time.
  if (cast<DeclContext>(D)->isDependentContext())
    return;
  if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred()) {
    // Flush out any possibly unreachable diagnostics.
    flushDiagnostics(S, fscope);
    return;
  }
  
  // Don't generate EH edges for CallExprs as we'd like to avoid the n^2
  // explosion for destrutors that can result and the compile time hit.
  AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true;
  AC.getCFGBuildOptions().AddEHEdges = false;
  AC.getCFGBuildOptions().AddInitializers = true;
  AC.getCFGBuildOptions().AddImplicitDtors = true;
  
  // Force that certain expressions appear as CFGElements in the CFG.  This
  // is used to speed up various analyses.
  // FIXME: This isn't the right factoring.  This is here for initial
  // prototyping, but we need a way for analyses to say what expressions they
  // expect to always be CFGElements and then fill in the BuildOptions
  // appropriately.  This is essentially a layering violation.
  if (P.enableCheckUnreachable) {
    // Unreachable code analysis requires a linearized CFG.
    AC.getCFGBuildOptions().setAllAlwaysAdd();
  }
  else {
    AC.getCFGBuildOptions()
      .setAlwaysAdd(Stmt::BinaryOperatorClass)
      .setAlwaysAdd(Stmt::BlockExprClass)
      .setAlwaysAdd(Stmt::CStyleCastExprClass)
      .setAlwaysAdd(Stmt::DeclRefExprClass)
      .setAlwaysAdd(Stmt::ImplicitCastExprClass)
      .setAlwaysAdd(Stmt::UnaryOperatorClass);
  }

  // Construct the analysis context with the specified CFG build options.
  
  // Emit delayed diagnostics.
  if (!fscope->PossiblyUnreachableDiags.empty()) {
    bool analyzed = false;

    // Register the expressions with the CFGBuilder.
    for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
         i = fscope->PossiblyUnreachableDiags.begin(),
         e = fscope->PossiblyUnreachableDiags.end();
         i != e; ++i) {
      if (const Stmt *stmt = i->stmt)
        AC.registerForcedBlockExpression(stmt);
    }

    if (AC.getCFG()) {
      analyzed = true;
      for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
            i = fscope->PossiblyUnreachableDiags.begin(),
            e = fscope->PossiblyUnreachableDiags.end();
            i != e; ++i)
      {
        const sema::PossiblyUnreachableDiag &D = *i;
        bool processed = false;
        if (const Stmt *stmt = i->stmt) {
          const CFGBlock *block = AC.getBlockForRegisteredExpression(stmt);
          assert(block);
          if (CFGReverseBlockReachabilityAnalysis *cra = AC.getCFGReachablityAnalysis()) {
            // Can this block be reached from the entrance?
            if (cra->isReachable(&AC.getCFG()->getEntry(), block))
        }
        if (!processed) {
          // Emit the warning anyway if we cannot map to a basic block.
          S.Diag(D.Loc, D.PD);
  if (P.enableCheckFallThrough) {
    const CheckFallThroughDiagnostics &CD =
      (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock()
                         : CheckFallThroughDiagnostics::MakeForFunction(D));
    CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC);
  // Check for thread safety violations
  if (P.enableThreadSafetyAnalysis) {
    thread_safety::ThreadSafetyReporter Reporter(S);
    thread_safety::runThreadSafetyAnalysis(AC, Reporter);
    Reporter.emitDiagnostics();
  }
  if (Diags.getDiagnosticLevel(diag::warn_uninit_var, D->getLocStart())
      != Diagnostic::Ignored ||
      Diags.getDiagnosticLevel(diag::warn_maybe_uninit_var, D->getLocStart())
      != Diagnostic::Ignored) {
      UninitValsDiagReporter reporter(S);
Fariborz Jahanian's avatar
Fariborz Jahanian committed
      UninitVariablesAnalysisStats stats;
      std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats));
      runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC,
                                        reporter, stats);

      if (S.CollectStats && stats.NumVariablesAnalyzed > 0) {
        ++NumUninitAnalysisFunctions;
        NumUninitAnalysisVariables += stats.NumVariablesAnalyzed;
        NumUninitAnalysisBlockVisits += stats.NumBlockVisits;
        MaxUninitAnalysisVariablesPerFunction =
            std::max(MaxUninitAnalysisVariablesPerFunction,
                     stats.NumVariablesAnalyzed);
        MaxUninitAnalysisBlockVisitsPerFunction =
            std::max(MaxUninitAnalysisBlockVisitsPerFunction,
                     stats.NumBlockVisits);
      }
    }
  }

  // Collect statistics about the CFG if it was built.
  if (S.CollectStats && AC.isCFGBuilt()) {
    ++NumFunctionsAnalyzed;
    if (CFG *cfg = AC.getCFG()) {
      // If we successfully built a CFG for this context, record some more
      // detail information about it.
      NumCFGBlocks += cfg->getNumBlockIDs();
      MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction,

void clang::sema::AnalysisBasedWarnings::PrintStats() const {
  llvm::errs() << "\n*** Analysis Based Warnings Stats:\n";

  unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs;
  unsigned AvgCFGBlocksPerFunction =
      !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt;
  llvm::errs() << NumFunctionsAnalyzed << " functions analyzed ("
               << NumFunctionsWithBadCFGs << " w/o CFGs).\n"
               << "  " << NumCFGBlocks << " CFG blocks built.\n"
               << "  " << AvgCFGBlocksPerFunction
               << " average CFG blocks per function.\n"
               << "  " << MaxCFGBlocksPerFunction
               << " max CFG blocks per function.\n";

  unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0
      : NumUninitAnalysisVariables/NumUninitAnalysisFunctions;
  unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0
      : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions;
  llvm::errs() << NumUninitAnalysisFunctions
               << " functions analyzed for uninitialiazed variables\n"
               << "  " << NumUninitAnalysisVariables << " variables analyzed.\n"
               << "  " << AvgUninitVariablesPerFunction
               << " average variables per function.\n"
               << "  " << MaxUninitAnalysisVariablesPerFunction
               << " max variables per function.\n"
               << "  " << NumUninitAnalysisBlockVisits << " block visits.\n"
               << "  " << AvgUninitBlockVisitsPerFunction
               << " average block visits per function.\n"
               << "  " << MaxUninitAnalysisBlockVisitsPerFunction
               << " max block visits per function.\n";
}