Skip to content
dotest.py 65.4 KiB
Newer Older
Johnny Chen's avatar
Johnny Chen committed

    signal.signal(signal.SIGALRM, alarm_handler)
    signal.alarm(delta)
    sys.stdout.write("pid=%d\n" % os.getpid())
    sys.stdout.write("Enter RET to proceed (or timeout after %d seconds):" %
                     delta)
    sys.stdout.flush()
    try:
        text = sys.stdin.readline()
    except:
        text = ""
    signal.alarm(0)
    sys.stdout.write("proceeding...\n")
    pass


def visit(prefix, dir, names):
    """Visitor function for os.path.walk(path, visit, arg)."""

    global suite
    global excluded

    if set(dir.split(os.sep)).intersection(excluded):
        #print "Detected an excluded dir component: %s" % dir
        return

    for name in names:
        if os.path.isdir(os.path.join(dir, name)):
            continue

        if '.py' == os.path.splitext(name)[1] and name.startswith(prefix):
            # Try to match the regexp pattern, if specified.
            if regexp:
                import re
                if re.search(regexp, name):
                    #print "Filename: '%s' matches pattern: '%s'" % (name, regexp)
                    pass
                else:
                    #print "Filename: '%s' does not match pattern: '%s'" % (name, regexp)
                    continue

            # We found a match for our test.  Add it to the suite.
Johnny Chen's avatar
Johnny Chen committed

            # Update the sys.path first.

            # Thoroughly check the filterspec against the base module and admit
            # the (base, filterspec) combination only when it makes sense.
                # Optimistically set the flag to True.
                filtered = True
                module = __import__(base)
                parts = filterspec.split('.')
                obj = module
                for part in parts:
                    try:
                        parent, obj = obj, getattr(obj, part)
                    except AttributeError:
                        # The filterspec has failed.
                        filtered = False
                        break
                    #print "adding filter spec %s to module %s" % (filterspec, module)
                    suite.addTests(
                        unittest2.defaultTestLoader.loadTestsFromName(filterspec, module))
                    continue

            # Forgo this module if the (base, filterspec) combo is invalid
            # and no '-g' option is specified
            if filters and fs4all and not filtered:
                continue
            # Add either the filtered test case(s) (which is done before) or the entire test class.
            if not filterspec or not filtered:
                # A simple case of just the module name.  Also the failover case
                # from the filterspec branch when the (base, filterspec) combo
                # doesn't make sense.
                suite.addTests(unittest2.defaultTestLoader.loadTestsFromName(base))
Johnny Chen's avatar
Johnny Chen committed
def lldbLoggings():
    """Check and do lldb loggings if necessary."""

    # Turn on logging for debugging purposes if ${LLDB_LOG} environment variable is
    # defined.  Use ${LLDB_LOG} to specify the log file.
    ci = lldb.DBG.GetCommandInterpreter()
    res = lldb.SBCommandReturnObject()
    if ("LLDB_LOG" in os.environ):
        if ("LLDB_LOG_OPTION" in os.environ):
            lldb_log_option = os.environ["LLDB_LOG_OPTION"]
        else:
            lldb_log_option = "event process expr state api"
Johnny Chen's avatar
Johnny Chen committed
        ci.HandleCommand(
            "log enable -n -f " + os.environ["LLDB_LOG"] + " lldb " + lldb_log_option,
Johnny Chen's avatar
Johnny Chen committed
            res)
        if not res.Succeeded():
            raise Exception('log enable failed (check LLDB_LOG env variable.')
    # Ditto for gdb-remote logging if ${GDB_REMOTE_LOG} environment variable is defined.
    # Use ${GDB_REMOTE_LOG} to specify the log file.
    if ("GDB_REMOTE_LOG" in os.environ):
        if ("GDB_REMOTE_LOG_OPTION" in os.environ):
            gdb_remote_log_option = os.environ["GDB_REMOTE_LOG_OPTION"]
        else:
Johnny Chen's avatar
Johnny Chen committed
        ci.HandleCommand(
            "log enable -n -f " + os.environ["GDB_REMOTE_LOG"] + " gdb-remote "
Johnny Chen's avatar
Johnny Chen committed
            + gdb_remote_log_option,
            res)
        if not res.Succeeded():
            raise Exception('log enable failed (check GDB_REMOTE_LOG env variable.')

    ps = subprocess.Popen([which('ps'), '-o', "command=CMD", str(os.getpid())], stdout=subprocess.PIPE).communicate()[0]
    lines = ps.split('\n')
    cmd_line = lines[1]
    return cmd_line
# ======================================== #
Johnny Chen's avatar
Johnny Chen committed
#                                          #
# Execution of the test driver starts here #
#                                          #
# ======================================== #
    cmd = ["defaults", "read", "com.apple.DebugSymbols"]
    pipe = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
    cmd_output = pipe.stdout.read()
    if cmd_output and "DBGFileMappedPaths = " in cmd_output:
        print "%s =>" % ' '.join(cmd)
        print "Disable automatic lookup and caching of dSYMs before running the test suite!"
        print "Exiting..."
        sys.exit(0)

# On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults
# does not exist before proceeding to running the test suite.
if sys.platform.startswith("darwin"):
    checkDsymForUUIDIsNotOn()

# Start the actions by first parsing the options while setting up the test
# directories, followed by setting up the search paths for lldb utilities;
# then, we walk the directory trees and collect the tests into our test suite.

#
# If '-d' is specified, do a delay of 10 seconds for the debugger to attach.
#
if delay:
Johnny Chen's avatar
Johnny Chen committed
    doDelay(10)
#
# If '-l' is specified, do not skip the long running tests.
    os.environ["LLDB_SKIP_LONG_RUNNING_TEST"] = "NO"

Johnny Chen's avatar
Johnny Chen committed
#
Johnny Chen's avatar
Johnny Chen committed
# Walk through the testdirs while collecting tests.
Johnny Chen's avatar
Johnny Chen committed
#
for testdir in testdirs:
    os.path.walk(testdir, visit, 'Test')

# Now that we have loaded all the test cases, run the whole test suite.
# For the time being, let's bracket the test runner within the
# lldb.SBDebugger.Initialize()/Terminate() pair.
# Update: the act of importing lldb now executes lldb.SBDebugger.Initialize(),
# there's no need to call it a second time.
#lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
# Create a singleton SBDebugger in the lldb namespace.
lldb.DBG = lldb.SBDebugger.Create()

# Put the blacklist in the lldb namespace, to be used by lldb.TestBase.
# The pre_flight and post_flight come from reading a config file.
lldb.pre_flight = pre_flight
lldb.post_flight = post_flight
def getsource_if_available(obj):
    """
    Return the text of the source code for an object if available.  Otherwise,
    a print representation is returned.
    """
    import inspect
    try:
        return inspect.getsource(obj)
    except:
        return repr(obj)

if not noHeaders:
    print "lldb.pre_flight:", getsource_if_available(lldb.pre_flight)
    print "lldb.post_flight:", getsource_if_available(lldb.post_flight)
# If either pre_flight or post_flight is defined, set lldb.test_remote to True.
if lldb.pre_flight or lldb.post_flight:
    lldb.test_remote = True
else:
    lldb.test_remote = False

# So do the lldbtest_remote_sandbox and lldbtest_remote_shell_template variables.
lldb.lldbtest_remote_sandbox = lldbtest_remote_sandbox
lldb.lldbtest_remote_sandboxed_executable = None
lldb.lldbtest_remote_shell_template = lldbtest_remote_shell_template

# Put all these test decorators in the lldb namespace.
lldb.dont_do_python_api_test = dont_do_python_api_test
lldb.just_do_python_api_test = just_do_python_api_test
lldb.just_do_benchmarks_test = just_do_benchmarks_test
lldb.dont_do_dsym_test = dont_do_dsym_test
lldb.dont_do_dwarf_test = dont_do_dwarf_test
# Do we need to skip build and cleanup?
lldb.skip_build_and_cleanup = skip_build_and_cleanup

# Put bmExecutable, bmBreakpointSpec, and bmIterationCount into the lldb namespace, too.
lldb.bmExecutable = bmExecutable
lldb.bmBreakpointSpec = bmBreakpointSpec
lldb.bmIterationCount = bmIterationCount
# And don't forget the runHooks!
lldb.runHooks = runHooks

Johnny Chen's avatar
Johnny Chen committed
# Turn on lldb loggings if necessary.
lldbLoggings()
# Install the control-c handler.
unittest2.signals.installHandler()

# If sdir_name is not specified through the '-s sdir_name' option, get a
# timestamp string and export it as LLDB_SESSION_DIR environment var.  This will
# be used when/if we want to dump the session info of individual test cases
# later on.
#
# See also TestBase.dumpSessionInfo() in lldbtest.py.
import datetime
# The windows platforms don't like ':' in the pathname.
timestamp_started = datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S")
    sdir_name = timestamp_started
os.environ["LLDB_SESSION_DIRNAME"] = os.path.join(os.getcwd(), sdir_name)
if not noHeaders:
    sys.stderr.write("\nSession logs for test failures/errors/unexpected successes"
                     " will go into directory '%s'\n" % sdir_name)
    sys.stderr.write("Command invoked: %s\n" % getMyCommandLine())
if not os.path.isdir(sdir_name):
    os.mkdir(sdir_name)
where_to_save_session = os.getcwd()
fname = os.path.join(sdir_name, "TestStarted")
    print >> f, "Test started at: %s\n" % timestamp_started
    print >> f, svn_info
    print >> f, "Command invoked: %s\n" % getMyCommandLine()

#
# Invoke the default TextTestRunner to run the test suite, possibly iterating
# over different configurations.
#

iterArchs = False
iterCompilers = False

if isinstance(archs, list) and len(archs) >= 1:
    iterArchs = True

if not compilers and "compilers" in config:
#
# Add some intervention here to sanity check that the compilers requested are sane.
# If found not to be an executable program, the invalid one is dropped from the list.
for i in range(len(compilers)):
    c = compilers[i]
    if which(c):
        continue
    else:
        if sys.platform.startswith("darwin"):
            pipe = subprocess.Popen(['xcrun', '-find', c], stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
            cmd_output = pipe.stdout.read()
            if cmd_output:
                if "not found" in cmd_output:
                    print "dropping %s from the compilers used" % c
                    compilers.remove(i)
                else:
                    compilers[i] = cmd_output.split('\n')[0]
                    print "'xcrun -find %s' returning %s" % (c, compilers[i])

if not parsable:
    print "compilers=%s" % str(compilers)

if not compilers or len(compilers) == 0:
    print "No eligible compiler found, exiting."
    sys.exit(1)

if isinstance(compilers, list) and len(compilers) >= 1:
    iterCompilers = True
# Make a shallow copy of sys.path, we need to manipulate the search paths later.
# This is only necessary if we are relocated and with different configurations.
# If we iterate on archs or compilers, there is a chance we want to split stderr/stdout.
if iterArchs or iterCompilers:
    old_stderr = sys.stderr
    old_stdout = sys.stdout
    new_stderr = None
    new_stdout = None

# Iterating over all possible architecture and compiler combinations.
for ia in range(len(archs) if iterArchs else 1):
    archConfig = ""
    if iterArchs:
        archConfig = "arch=%s" % archs[ia]
    for ic in range(len(compilers) if iterCompilers else 1):
        if iterCompilers:
            configString = "%s compiler=%s" % (archConfig, compilers[ic])
        else:
            configString = archConfig

        if iterArchs or iterCompilers:
            # Translate ' ' to '-' for pathname component.
            from string import maketrans
            tbl = maketrans(' ', '-')
            configPostfix = configString.translate(tbl)

            # Check whether we need to split stderr/stdout into configuration
            # specific files.
            if old_stderr.name != '<stderr>' and config.get('split_stderr'):
                if new_stderr:
                    new_stderr.close()
                new_stderr = open("%s.%s" % (old_stderr.name, configPostfix), "w")
                sys.stderr = new_stderr
            if old_stdout.name != '<stdout>' and config.get('split_stdout'):
                if new_stdout:
                    new_stdout.close()
                new_stdout = open("%s.%s" % (old_stdout.name, configPostfix), "w")
                sys.stdout = new_stdout
            # If we specified a relocated directory to run the test suite, do
            # the extra housekeeping to copy the testdirs to a configStringified
            # directory and to update sys.path before invoking the test runner.
            # The purpose is to separate the configuration-specific directories
            # from each other.
            if rdir:
                from shutil import copytree, rmtree, ignore_patterns

                newrdir = "%s.%s" % (rdir, configPostfix)

                # Copy the tree to a new directory with postfix name configPostfix.
                if os.path.exists(newrdir):
                    rmtree(newrdir)
                copytree(rdir, newrdir, ignore=ignore_patterns('*.pyc', '*.o', '*.d'))

                # Update the LLDB_TEST environment variable to reflect new top
                # level test directory.
                #
                # See also lldbtest.TestBase.setUpClass(cls).
                if len(testdirs) == 1 and os.path.basename(testdirs[0]) == 'test':
                    os.environ["LLDB_TEST"] = os.path.join(newrdir, 'test')
                else:
                    os.environ["LLDB_TEST"] = newrdir

                # And update the Python search paths for modules.
                sys.path = [x.replace(rdir, newrdir, 1) for x in old_sys_path]

            # Output the configuration.
            if not parsable:
                sys.stderr.write("\nConfiguration: " + configString + "\n")

        #print "sys.stderr name is", sys.stderr.name
        #print "sys.stdout name is", sys.stdout.name

        # First, write out the number of collected test cases.
        if not parsable:
            sys.stderr.write(separator + "\n")
            sys.stderr.write("Collected %d test%s\n\n"
                             % (suite.countTestCases(),
                                suite.countTestCases() != 1 and "s" or ""))
        class LLDBTestResult(unittest2.TextTestResult):
            """
            Enforce a singleton pattern to allow introspection of test progress.

            Overwrite addError(), addFailure(), and addExpectedFailure() methods
            to enable each test instance to track its failure/error status.  It
            is used in the LLDB test framework to emit detailed trace messages
            to a log file for easier human inspection of test failres/errors.
            @staticmethod
            def getTerminalSize():
                import os
                env = os.environ
                def ioctl_GWINSZ(fd):
                    try:
                        import fcntl, termios, struct, os
                        cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
                    '1234'))
                    except:
                        return
                    return cr
                cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
                if not cr:
                    try:
                        fd = os.open(os.ctermid(), os.O_RDONLY)
                        cr = ioctl_GWINSZ(fd)
                        os.close(fd)
                    except:
                        pass
                if not cr:
                    cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
                return int(cr[1]), int(cr[0])

                if not LLDBTestResult.__ignore_singleton__ and LLDBTestResult.__singleton__:
                    raise Exception("LLDBTestResult instantiated more than once")
                super(LLDBTestResult, self).__init__(*args)
                LLDBTestResult.__singleton__ = self
                # Now put this singleton into the lldb module namespace.
                lldb.test_result = self
                # Computes the format string for displaying the counter.
                global suite
                counterWidth = len(str(suite.countTestCases()))
                self.fmt = "%" + str(counterWidth) + "d: "
                self.indentation = ' ' * (counterWidth + 2)
                # This counts from 1 .. suite.countTestCases().
                self.counter = 0
                (width, height) = LLDBTestResult.getTerminalSize()
                global progress_bar
                if width > 10 and not parsable and progress_bar:
                    try:
                        self.progressbar = progress.ProgressWithEvents(stdout=self.stream,start=0,end=suite.countTestCases(),width=width-10)
                    except:
                        self.progressbar = None
            def _config_string(self, test):
              compiler = getattr(test, "getCompiler", None)
              arch = getattr(test, "getArchitecture", None)
              return "%s-%s" % (compiler() if compiler else "", arch() if arch else "")

            def _exc_info_to_string(self, err, test):
                """Overrides superclass TestResult's method in order to append
                our test config info string to the exception info string."""
                if hasattr(test, "getArchitecture") and hasattr(test, "getCompiler"):
                    return '%sConfig=%s-%s' % (super(LLDBTestResult, self)._exc_info_to_string(err, test),
                                                              test.getArchitecture(),
                                                              test.getCompiler())
                else:
                    return super(LLDBTestResult, self)._exc_info_to_string(err, test)
            def getDescription(self, test):
                doc_first_line = test.shortDescription()
                if self.descriptions and doc_first_line:
                    return '\n'.join((str(test), self.indentation + doc_first_line))
                else:
                    return str(test)

            def getCategoriesForTest(self,test):
                if hasattr(test,"_testMethodName"):
                    test_method = getattr(test,"_testMethodName")
                    test_method = getattr(test,test_method)
                else:
                    test_method = None
                if test_method != None and hasattr(test_method,"getCategories"):
                    test_categories = test_method.getCategories(test)
                elif hasattr(test,"getCategories"):
                    test_categories = test.getCategories()
                elif inspect.ismethod(test) and test.__self__ != None and hasattr(test.__self__,"getCategories"):
                    test_categories = test.__self__.getCategories()
                else:
                    test_categories = []
                if test_categories == None:
                    test_categories = []
                return test_categories

            def shouldSkipBecauseOfCategories(self,test):
                global useCategories
                import inspect
                if useCategories:
                    global categoriesList
                    test_categories = self.getCategoriesForTest(test)
                    if len(test_categories) == 0 or len(categoriesList & set(test_categories)) == 0:
                        return True

                global skipCategories
                for category in skipCategories:
                    if category in self.getCategoriesForTest(test):
                        return True

                return False

            def hardMarkAsSkipped(self,test):
                getattr(test, test._testMethodName).__func__.__unittest_skip__ = True
                getattr(test, test._testMethodName).__func__.__unittest_skip_why__ = "test case does not fall in any category of interest for this run"
                test.__class__.__unittest_skip__ = True
                test.__class__.__unittest_skip_why__ = "test case does not fall in any category of interest for this run"
                if self.shouldSkipBecauseOfCategories(test):
                    self.hardMarkAsSkipped(test)
                self.counter += 1
                if self.showAll:
                    self.stream.write(self.fmt % self.counter)
                super(LLDBTestResult, self).startTest(test)
            def addSuccess(self, test):
                global parsable
                super(LLDBTestResult, self).addSuccess(test)
                if parsable:
                    self.stream.write("PASS: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))

                global parsable
                super(LLDBTestResult, self).addError(test, err)
                method = getattr(test, "markError", None)
                if method:
                    method()
                if parsable:
                    self.stream.write("FAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))
                global parsable
                super(LLDBTestResult, self).addFailure(test, err)
                method = getattr(test, "markFailure", None)
                if method:
                    method()
                if parsable:
                    self.stream.write("FAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))
                if useCategories:
                    test_categories = self.getCategoriesForTest(test)
                    for category in test_categories:
                        if category in failuresPerCategory:
                            failuresPerCategory[category] = failuresPerCategory[category] + 1
                        else:
                            failuresPerCategory[category] = 1
            def addExpectedFailure(self, test, err, bugnumber):
                global parsable
                super(LLDBTestResult, self).addExpectedFailure(test, err, bugnumber)
                method = getattr(test, "markExpectedFailure", None)
                if method:
                    method(err, bugnumber)
                if parsable:
                    self.stream.write("XFAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))
            def addSkip(self, test, reason):
                global sdir_has_content
                global parsable
                sdir_has_content = True
                super(LLDBTestResult, self).addSkip(test, reason)
                method = getattr(test, "markSkippedTest", None)
                if method:
                    method()
                if parsable:
                    self.stream.write("UNSUPPORTED: LLDB (%s) :: %s (%s) \n" % (self._config_string(test), str(test), reason))
            def addUnexpectedSuccess(self, test, bugnumber):
                global parsable
                super(LLDBTestResult, self).addUnexpectedSuccess(test, bugnumber)
                method = getattr(test, "markUnexpectedSuccess", None)
                if method:
                    method(bugnumber)
                if parsable:
                    self.stream.write("XPASS: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))

        if parsable:
            v = 0
        elif progress_bar:
            v = 1
        else:
            v = verbose
        # Invoke the test runner.
            result = unittest2.TextTestRunner(stream=sys.stderr,
                                              verbosity=v,
Johnny Chen's avatar
Johnny Chen committed
            # We are invoking the same test suite more than once.  In this case,
            # mark __ignore_singleton__ flag as True so the signleton pattern is
            # not enforced.
            LLDBTestResult.__ignore_singleton__ = True
                result = unittest2.TextTestRunner(stream=sys.stderr,
                                                  verbosity=v,
if sdir_has_content and not parsable:
    sys.stderr.write("Session logs for test failures/errors/unexpected successes"
                     " can be found in directory '%s'\n" % sdir_name)
if useCategories and len(failuresPerCategory) > 0:
    sys.stderr.write("Failures per category:\n")
    for category in failuresPerCategory:
        sys.stderr.write("%s - %d\n" % (category,failuresPerCategory[category]))

os.chdir(where_to_save_session)
fname = os.path.join(sdir_name, "TestFinished")
with open(fname, "w") as f:
    print >> f, "Test finished at: %s\n" % datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S")

Johnny Chen's avatar
Johnny Chen committed
# Terminate the test suite if ${LLDB_TESTSUITE_FORCE_FINISH} is defined.
# This should not be necessary now.
if ("LLDB_TESTSUITE_FORCE_FINISH" in os.environ):
    print "Terminating Test suite..."
    subprocess.Popen(["/bin/sh", "-c", "kill %s; exit 0" % (os.getpid())])