Skip to content
lldbtest.py 57.3 KiB
Newer Older
"""
LLDB module which provides the abstract base class of lldb test case.

The concrete subclass can override lldbtest.TesBase in order to inherit the
common behavior for unitest.TestCase.setUp/tearDown implemented in this file.

The subclass should override the attribute mydir in order for the python runtime
to locate the individual test cases when running as part of a large test suite
or when running each test case as a separate python invocation.

./dotest.py provides a test driver which sets up the environment to run the
# Exercises the test suite in the types directory....
/Volumes/data/lldb/svn/ToT/test $ ./dotest.py -A x86_64 types
Session logs for test failures/errors/unexpected successes will go into directory '2012-05-16-13_35_42'
Command invoked: python ./dotest.py -A x86_64 types
compilers=['clang']
Configuration: arch=x86_64 compiler=clang
----------------------------------------------------------------------
Collected 72 tests
........................................................................
----------------------------------------------------------------------
# See also dotest.parseOptionsAndInitTestdirs(), where the environment variables
Johnny Chen's avatar
Johnny Chen committed
# LLDB_COMMAND_TRACE and LLDB_DO_CLEANUP are set from '-t' and '-r dir' options.
if "LLDB_COMMAND_TRACE" in os.environ and os.environ["LLDB_COMMAND_TRACE"]=="YES":
    traceAlways = True
else:
    traceAlways = False

# By default, doCleanup is True.
if "LLDB_DO_CLEANUP" in os.environ and os.environ["LLDB_DO_CLEANUP"]=="NO":
    doCleanup = False
else:
    doCleanup = True

COMMAND_FAILED_AS_EXPECTED = "Command has failed as expected"

CURRENT_EXECUTABLE_SET = "Current executable set successfully"

PROCESS_IS_VALID = "Process is valid"

PROCESS_KILLED = "Process is killed successfully"

PROCESS_EXITED = "Process exited successfully"

PROCESS_STOPPED = "Process status should be stopped"

RUN_SUCCEEDED = "Process is launched successfully"
RUN_COMPLETED = "Process exited successfully"
BACKTRACE_DISPLAYED_CORRECTLY = "Backtrace displayed correctly"

BREAKPOINT_CREATED = "Breakpoint created successfully"

BREAKPOINT_STATE_CORRECT = "Breakpoint state is correct"

BREAKPOINT_PENDING_CREATED = "Pending breakpoint created successfully"

BREAKPOINT_HIT_ONCE = "Breakpoint resolved with hit cout = 1"
BREAKPOINT_HIT_TWICE = "Breakpoint resolved with hit cout = 2"

BREAKPOINT_HIT_THRICE = "Breakpoint resolved with hit cout = 3"

MISSING_EXPECTED_REGISTERS = "At least one expected register is unavailable."

OBJECT_PRINTED_CORRECTLY = "Object printed correctly"

SOURCE_DISPLAYED_CORRECTLY = "Source code displayed correctly"

STEP_OUT_SUCCEEDED = "Thread step-out succeeded"

STOPPED_DUE_TO_EXC_BAD_ACCESS = "Process should be stopped due to bad access exception"

STOPPED_DUE_TO_BREAKPOINT = "Process should be stopped due to breakpoint"
STOPPED_DUE_TO_BREAKPOINT_WITH_STOP_REASON_AS = "%s, %s" % (
    STOPPED_DUE_TO_BREAKPOINT, "instead, the actual stop reason is: '%s'")
STOPPED_DUE_TO_BREAKPOINT_CONDITION = "Stopped due to breakpoint condition"

STOPPED_DUE_TO_BREAKPOINT_IGNORE_COUNT = "Stopped due to breakpoint and ignore count"

STOPPED_DUE_TO_SIGNAL = "Process state is stopped due to signal"

STOPPED_DUE_TO_STEP_IN = "Process state is stopped due to step in"

STOPPED_DUE_TO_WATCHPOINT = "Process should be stopped due to watchpoint"

DATA_TYPES_DISPLAYED_CORRECTLY = "Data type(s) displayed correctly"

VALID_BREAKPOINT = "Got a valid breakpoint"

VALID_BREAKPOINT_LOCATION = "Got a valid breakpoint location"

VALID_COMMAND_INTERPRETER = "Got a valid command interpreter"

VALID_VARIABLE = "Got a valid variable"

VARIABLES_DISPLAYED_CORRECTLY = "Variable(s) displayed correctly"
WATCHPOINT_CREATED = "Watchpoint created successfully"
    '''A generic "Command '%s' returns successfully" message generator.'''
    return "Command '%s' returns successfully" % str

def COMPLETION_MSG(str_before, str_after):
    '''A generic message generator for the completion mechanism.'''
    return "'%s' successfully completes to '%s'" % (str_before, str_after)

    '''A generic "'%s' returns expected result" message generator if exe.
    Otherwise, it generates "'%s' matches expected result" message.'''
    return "'%s' %s expected result" % (str, 'returns' if exe else 'matches')
    '''A generic "Value of setting '%s' is correct" message generator.'''
    return "Value of setting '%s' is correct" % setting

    """Returns an env variable array from the os.environ map object."""
    return map(lambda k,v: k+"="+v, os.environ.keys(), os.environ.values())

def line_number(filename, string_to_match):
    """Helper function to return the line number of the first matched string."""
    with open(filename, 'r') as f:
        for i, line in enumerate(f):
            if line.find(string_to_match) != -1:
                # Found our match.
    raise Exception("Unable to find '%s' within file %s" % (string_to_match, filename))
def pointer_size():
    """Return the pointer size of the host system."""
    import ctypes
    a_pointer = ctypes.c_void_p(0xffff)
    return 8 * ctypes.sizeof(a_pointer)

def is_exe(fpath):
    """Returns true if fpath is an executable."""
    return os.path.isfile(fpath) and os.access(fpath, os.X_OK)

def which(program):
    """Returns the full path to a program; None otherwise."""
    fpath, fname = os.path.split(program)
    if fpath:
        if is_exe(program):
            return program
    else:
        for path in os.environ["PATH"].split(os.pathsep):
            exe_file = os.path.join(path, program)
            if is_exe(exe_file):
                return exe_file
    return None

class recording(StringIO.StringIO):
    """
    A nice little context manager for recording the debugger interactions into
    our session object.  If trace flag is ON, it also emits the interactions
    into the stderr.
    """
    def __init__(self, test, trace):
        """Create a StringIO instance; record the session obj and trace flag."""
        # The test might not have undergone the 'setUp(self)' phase yet, so that
        # the attribute 'session' might not even exist yet.
        self.session = getattr(test, "session", None) if test else None
        self.trace = trace

    def __enter__(self):
        """
        Context management protocol on entry to the body of the with statement.
        Just return the StringIO object.
        """
        return self

    def __exit__(self, type, value, tb):
        """
        Context management protocol on exit from the body of the with statement.
        If trace is ON, it emits the recordings into stderr.  Always add the
        recordings to our session object.  And close the StringIO object, too.
        """
        if self.trace:
            print >> sys.stderr, self.getvalue()
        if self.session:
            print >> self.session, self.getvalue()
# From 2.7's subprocess.check_output() convenience function.
# Return a tuple (stdoutdata, stderrdata).
Johnny Chen's avatar
Johnny Chen committed
    r"""Run an os command with arguments and return its output as a byte string.

    If the exit code was non-zero it raises a CalledProcessError.  The
    CalledProcessError object will have the return code in the returncode
    attribute and output in the output attribute.

    The arguments are the same as for the Popen constructor.  Example:

    >>> check_output(["ls", "-l", "/dev/null"])
    'crw-rw-rw- 1 root root 1, 3 Oct 18  2007 /dev/null\n'

    The stdout argument is not allowed as it is used internally.
    To capture standard error in the result, use stderr=STDOUT.

    >>> check_output(["/bin/sh", "-c",
    ...               "ls -l non_existent_file ; exit 0"],
    ...              stderr=STDOUT)
    'ls: non_existent_file: No such file or directory\n'
    """

    # Assign the sender object to variable 'test' and remove it from kwargs.
    test = kwargs.pop('sender', None)

    if 'stdout' in kwargs:
        raise ValueError('stdout argument not allowed, it will be overridden.')
    process = Popen(stdout=PIPE, stderr=PIPE, *popenargs, **kwargs)
    output, error = process.communicate()
    retcode = process.poll()

    with recording(test, traceAlways) as sbuf:
        if isinstance(popenargs, types.StringTypes):
            args = [popenargs]
        else:
            args = list(popenargs)
        print >> sbuf
        print >> sbuf, "os command:", args
        print >> sbuf, "stdout:", output
        print >> sbuf, "stderr:", error
        print >> sbuf, "retcode:", retcode
        print >> sbuf

    if retcode:
        cmd = kwargs.get("args")
        if cmd is None:
            cmd = popenargs[0]
        raise CalledProcessError(retcode, cmd)
    return (output, error)
def getsource_if_available(obj):
    """
    Return the text of the source code for an object if available.  Otherwise,
    a print representation is returned.
    """
    import inspect
    try:
        return inspect.getsource(obj)
    except:
        return repr(obj)

def builder_module():
    return __import__("builder_" + sys.platform)

from functools import wraps
def python_api_test(func):
    """Decorate the item as a Python API only test."""
    if isinstance(func, type) and issubclass(func, unittest2.TestCase):
        raise Exception("@python_api_test can only be used to decorate a test method")
    @wraps(func)
    def wrapper(self, *args, **kwargs):
        try:
            if lldb.dont_do_python_api_test:
                self.skipTest("python api tests")
        except AttributeError:
            pass
        return func(self, *args, **kwargs)
    # Mark this function as such to separate them from lldb command line tests.
    wrapper.__python_api_test__ = True
    return wrapper
def benchmarks_test(func):
    """Decorate the item as a benchmarks test."""
    if isinstance(func, type) and issubclass(func, unittest2.TestCase):
        raise Exception("@benchmarks_test can only be used to decorate a test method")
    @wraps(func)
    def wrapper(self, *args, **kwargs):
        try:
            if not lldb.just_do_benchmarks_test:
                self.skipTest("benchmarks tests")
        except AttributeError:
            pass
        return func(self, *args, **kwargs)
    # Mark this function as such to separate them from the regular tests.
    wrapper.__benchmarks_test__ = True
    return wrapper
def dsym_test(func):
    """Decorate the item as a dsym test."""
    if isinstance(func, type) and issubclass(func, unittest2.TestCase):
        raise Exception("@dsym_test can only be used to decorate a test method")
    @wraps(func)
    def wrapper(self, *args, **kwargs):
        try:
            if lldb.dont_do_dsym_test:
                self.skipTest("dsym tests")
        except AttributeError:
            pass
        return func(self, *args, **kwargs)

    # Mark this function as such to separate them from the regular tests.
    wrapper.__dsym_test__ = True
    return wrapper

def dwarf_test(func):
    """Decorate the item as a dwarf test."""
    if isinstance(func, type) and issubclass(func, unittest2.TestCase):
        raise Exception("@dwarf_test can only be used to decorate a test method")
    @wraps(func)
    def wrapper(self, *args, **kwargs):
        try:
            if lldb.dont_do_dwarf_test:
                self.skipTest("dwarf tests")
        except AttributeError:
            pass
        return func(self, *args, **kwargs)

    # Mark this function as such to separate them from the regular tests.
    wrapper.__dwarf_test__ = True
    return wrapper

def expectedFailureCompiler(bugnumber=None):
     if callable(bugnumber):
        @wraps(bugnumber)
        def expectedFailureCompiler_easy_wrapper(*args, **kwargs):
	        from unittest2 import case
	        self = args[0]
	        test_compiler = self.getCompiler()
	        try:
	            bugnumber(*args, **kwargs)
	        except Exception:
	            if compiler in test_compiler:
	                raise _ExpectedFailure(sys.exc_info(),None)
	            else:
	                raise
	        if compiler in test_compiler:
	            raise case._UnexpectedSuccess(sys.exc_info(),None)
        return expectedFailureCompiler_easy_wrapper
     else:
        def expectedFailureCompiler_impl(func):
              @wraps(func)
              def wrapper(*args, **kwargs):
		        from unittest2 import case
		        self = args[0]
		        test_compiler = self.getCompiler()
		        try:
		            func(*args, **kwargs)
		        except Exception:
		            if compiler in test_compiler:
		                raise _ExpectedFailure(sys.exc_info(),None)
		            else:
		                raise
		        if compiler in test_compiler:
		            raise case._UnexpectedSuccess(sys.exc_info(),None)
              return wrapper
        return expectedFailureCompiler_impl

def expectedFailureGcc(func):
    """Decorate the item as a GCC only expectedFailure."""
    if isinstance(func, type) and issubclass(func, unittest2.TestCase):
        raise Exception("@expectedFailureClang can only be used to decorate a test method")
    return expectedFailureCompiler(func, "gcc")

def expectedFailureClang(func):
    """Decorate the item as a Clang only expectedFailure."""
    if isinstance(func, type) and issubclass(func, unittest2.TestCase):
        raise Exception("@expectedFailureClang can only be used to decorate a test method")
    return expectedFailureCompiler(func, "clang")

def expectedFailurei386(bugnumber=None):
     if callable(bugnumber):
        @wraps(bugnumber)
        def expectedFailurei386_easy_wrapper(*args, **kwargs):
	        from unittest2 import case
	        self = args[0]
	        arch = self.getArchitecture()
	        try:
	            bugnumber(*args, **kwargs)
	        except Exception:
	            if "i386" in arch:
	                raise _ExpectedFailure(sys.exc_info(),None)
	            else:
	                raise
	        if "i386" in arch:
	            raise case._UnexpectedSuccess(sys.exc_info(),None)
        return expectedFailurei386_easy_wrapper
     else:
        def expectedFailurei386_impl(func):
              @wraps(func)
              def wrapper(*args, **kwargs):
		        from unittest2 import case
		        self = args[0]
		        arch = self.getArchitecture()
		        try:
		            func(*args, **kwargs)
		        except Exception:
		            if "i386" in arch:
		                raise _ExpectedFailure(sys.exc_info(),None)
		            else:
		                raise
		        if "i386" in arch:
		            raise case._UnexpectedSuccess(sys.exc_info(),None)
              return wrapper
        return expectedFailurei386_impl

def expectedFailureLinux(bugnumber=None):
     if callable(bugnumber):
        @wraps(bugnumber)
        def expectedFailureLinux_easy_wrapper(*args, **kwargs):
	        from unittest2 import case
	        self = args[0]
	        platform = sys.platform
	        try:
	            bugnumber(*args, **kwargs)
	        except Exception:
	            if "linux" in platform:
	                raise _ExpectedFailure(sys.exc_info(),None)
	            else:
	                raise
	        if "linux" in platform:
	            raise case._UnexpectedSuccess(sys.exc_info(),None)
        return expectedFailureLinux_easy_wrapper
     else:
        def expectedFailureLinux_impl(func):
              @wraps(func)
              def wrapper(*args, **kwargs):
		        from unittest2 import case
		        self = args[0]
		        platform = sys.platform
		        try:
		            func(*args, **kwargs)
		        except Exception:
		            if "linux" in platform:
		                raise _ExpectedFailure(sys.exc_info(),None)
		            else:
		                raise
		        if "linux" in platform:
		            raise case._UnexpectedSuccess(sys.exc_info(),None)
              return wrapper
        return expectedFailureLinux_impl

def skipOnLinux(func):
    """Decorate the item to skip tests that should be skipped on Linux."""
    if isinstance(func, type) and issubclass(func, unittest2.TestCase):
        raise Exception("@skipOnLinux can only be used to decorate a test method")
    @wraps(func)
    def wrapper(*args, **kwargs):
        from unittest2 import case
        self = args[0]
        platform = sys.platform
        if "linux" in platform:
            self.skipTest("skip on linux")
        else:
    return wrapper

def skipIfGcc(func):
    """Decorate the item to skip tests that should be skipped if building with gcc ."""
    if isinstance(func, type) and issubclass(func, unittest2.TestCase):
        raise Exception("@skipOnLinux can only be used to decorate a test method")
    @wraps(func)
    def wrapper(*args, **kwargs):
        from unittest2 import case
        self = args[0]
        compiler = self.getCompiler()
        if "gcc" in compiler:
            self.skipTest("skipping because gcc is the test compiler")
        else:
            func(*args, **kwargs)
    return wrapper


class Base(unittest2.TestCase):
    """
    Abstract base for performing lldb (see TestBase) or other generic tests (see
    BenchBase for one example).  lldbtest.Base works with the test driver to
    accomplish things.
    
    # The concrete subclass should override this attribute.
    mydir = None
    # Keep track of the old current working directory.
    oldcwd = None
    def TraceOn(self):
        """Returns True if we are in trace mode (tracing detailed test execution)."""
        return traceAlways

        """
        Python unittest framework class setup fixture.
        Do current directory manipulation.
        """

        # Fail fast if 'mydir' attribute is not overridden.
        if not cls.mydir or len(cls.mydir) == 0:
            raise Exception("Subclasses must override the 'mydir' attribute.")

        # Change current working directory if ${LLDB_TEST} is defined.
        # See also dotest.py which sets up ${LLDB_TEST}.
        if ("LLDB_TEST" in os.environ):
                print >> sys.stderr, "Change dir to:", os.path.join(os.environ["LLDB_TEST"], cls.mydir)
            os.chdir(os.path.join(os.environ["LLDB_TEST"], cls.mydir))

    @classmethod
    def tearDownClass(cls):
        """
        Python unittest framework class teardown fixture.
        Do class-wide cleanup.
        """
        if doCleanup and not lldb.skip_build_and_cleanup:
            # First, let's do the platform-specific cleanup.
            if not module.cleanup():
                raise Exception("Don't know how to do cleanup")
            # Subclass might have specific cleanup function defined.
            if getattr(cls, "classCleanup", None):
                if traceAlways:
                    print >> sys.stderr, "Call class-specific cleanup function for class:", cls
                try:
                    cls.classCleanup()
                except:
                    exc_type, exc_value, exc_tb = sys.exc_info()
                    traceback.print_exception(exc_type, exc_value, exc_tb)

        # Restore old working directory.
        if traceAlways:
            print >> sys.stderr, "Restore dir to:", cls.oldcwd
    @classmethod
    def skipLongRunningTest(cls):
        """
        By default, we skip long running test case.
        This can be overridden by passing '-l' to the test driver (dotest.py).
        """
        if "LLDB_SKIP_LONG_RUNNING_TEST" in os.environ and "NO" == os.environ["LLDB_SKIP_LONG_RUNNING_TEST"]:
            return False
        else:
            return True

    def setUp(self):
        """Fixture for unittest test case setup.

        It works with the test driver to conditionally skip tests and does other
        initializations."""
        if "LLDB_EXEC" in os.environ:
            self.lldbExec = os.environ["LLDB_EXEC"]
        else:
            self.lldbExec = None
        if "LLDB_HERE" in os.environ:
            self.lldbHere = os.environ["LLDB_HERE"]
        else:
            self.lldbHere = None
        # If we spawn an lldb process for test (via pexpect), do not load the
        # init file unless told otherwise.
        if "NO_LLDBINIT" in os.environ and "NO" == os.environ["NO_LLDBINIT"]:
            self.lldbOption = ""
        else:
            self.lldbOption = "--no-lldbinit"
        # Assign the test method name to self.testMethodName.
        #
        # For an example of the use of this attribute, look at test/types dir.
        # There are a bunch of test cases under test/types and we don't want the
        # module cacheing subsystem to be confused with executable name "a.out"
        # used for all the test cases.
        self.testMethodName = self._testMethodName

        # Python API only test is decorated with @python_api_test,
        # which also sets the "__python_api_test__" attribute of the
        # function object to True.
        try:
            if lldb.just_do_python_api_test:
                testMethod = getattr(self, self._testMethodName)
                if getattr(testMethod, "__python_api_test__", False):
                    pass
                else:
                    self.skipTest("non python api test")
        except AttributeError:
            pass

        # Benchmarks test is decorated with @benchmarks_test,
        # which also sets the "__benchmarks_test__" attribute of the
        # function object to True.
        try:
            if lldb.just_do_benchmarks_test:
                testMethod = getattr(self, self._testMethodName)
                if getattr(testMethod, "__benchmarks_test__", False):
                    pass
                else:
                    self.skipTest("non benchmarks test")
        except AttributeError:
            pass

        # This is for the case of directly spawning 'lldb'/'gdb' and interacting
        # with it using pexpect.
        self.child = None
        self.child_prompt = "(lldb) "
        # If the child is interacting with the embedded script interpreter,
        # there are two exits required during tear down, first to quit the
        # embedded script interpreter and second to quit the lldb command
        # interpreter.
        self.child_in_script_interpreter = False

        # These are for customized teardown cleanup.
        self.dict = None
        self.doTearDownCleanup = False
        # And in rare cases where there are multiple teardown cleanups.
        self.dicts = []
        self.doTearDownCleanups = False

        # List of spawned subproces.Popen objects
        self.subprocesses = []

        # Create a string buffer to record the session info, to be dumped into a
        # test case specific file if test failure is encountered.
        self.session = StringIO.StringIO()

        # Optimistically set __errored__, __failed__, __expected__ to False
        # initially.  If the test errored/failed, the session info
        # (self.session) is then dumped into a session specific file for
        # diagnosis.
        self.__errored__    = False
        self.__failed__     = False
        self.__expected__   = False
        # We are also interested in unexpected success.
        self.__unexpected__ = False
        # And skipped tests.
        self.__skipped__ = False

        # See addTearDownHook(self, hook) which allows the client to add a hook
        # function to be run during tearDown() time.
        self.hooks = []

        # See HideStdout(self).
        self.sys_stdout_hidden = False

        # set environment variable names for finding shared libraries
        if sys.platform.startswith("darwin"):
            self.dylibPath = 'DYLD_LIBRARY_PATH'
        elif sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
            self.dylibPath = 'LD_LIBRARY_PATH'

    def runHooks(self, child=None, child_prompt=None, use_cmd_api=False):
        """Perform the run hooks to bring lldb debugger to the desired state.

        By default, expect a pexpect spawned child and child prompt to be
        supplied (use_cmd_api=False).  If use_cmd_api is true, ignore the child
        and child prompt and use self.runCmd() to run the hooks one by one.

        Note that child is a process spawned by pexpect.spawn().  If not, your
        test case is mostly likely going to fail.

        See also dotest.py where lldb.runHooks are processed/populated.
        """
        if not lldb.runHooks:
            self.skipTest("No runhooks specified for lldb, skip the test")
        if use_cmd_api:
            for hook in lldb.runhooks:
                self.runCmd(hook)
        else:
            if not child or not child_prompt:
                self.fail("Both child and child_prompt need to be defined.")
            for hook in lldb.runHooks:
                child.sendline(hook)
                child.expect_exact(child_prompt)
    def setAsync(self, value):
        """ Sets async mode to True/False and ensures it is reset after the testcase completes."""
        old_async = self.dbg.GetAsync()
        self.dbg.SetAsync(value)
        self.addTearDownHook(lambda: self.dbg.SetAsync(old_async))

    def cleanupSubprocesses(self):
        # Ensure any subprocesses are cleaned up
        for p in self.subprocesses:
            if p.poll() == None:
                p.terminate()
            del p
        del self.subprocesses[:]

    def spawnSubprocess(self, executable, args=[]):
        """ Creates a subprocess.Popen object with the specified executable and arguments,
            saves it in self.subprocesses, and returns the object.
            NOTE: if using this function, ensure you also call:

              self.addTearDownHook(self.cleanupSubprocesses)

            otherwise the test suite will leak processes.
        """

        # Don't display the stdout if not in TraceOn() mode.
        proc = Popen([executable] + args,
                     stdout = open(os.devnull) if not self.TraceOn() else None,
                     stdin = PIPE)
        self.subprocesses.append(proc)
        return proc

    def HideStdout(self):
        """Hide output to stdout from the user.

        During test execution, there might be cases where we don't want to show the
        standard output to the user.  For example,

            self.runCmd(r'''sc print "\n\n\tHello!\n"''')

        tests whether command abbreviation for 'script' works or not.  There is no
        need to show the 'Hello' output to the user as long as the 'script' command
        succeeds and we are not in TraceOn() mode (see the '-t' option).

        In this case, the test method calls self.HideStdout(self) to redirect the
        sys.stdout to a null device, and restores the sys.stdout upon teardown.

        Note that you should only call this method at most once during a test case
        execution.  Any subsequent call has no effect at all."""
        if self.sys_stdout_hidden:
            return

        self.sys_stdout_hidden = True
        old_stdout = sys.stdout
        sys.stdout = open(os.devnull, 'w')
        def restore_stdout():
            sys.stdout = old_stdout
        self.addTearDownHook(restore_stdout)

    # =======================================================================
    # Methods for customized teardown cleanups as well as execution of hooks.
    # =======================================================================

    def setTearDownCleanup(self, dictionary=None):
        """Register a cleanup action at tearDown() time with a dictinary"""
        self.dict = dictionary
        self.doTearDownCleanup = True

    def addTearDownCleanup(self, dictionary):
        """Add a cleanup action at tearDown() time with a dictinary"""
        self.dicts.append(dictionary)
        self.doTearDownCleanups = True

    def addTearDownHook(self, hook):
        """
        Add a function to be run during tearDown() time.

        Hooks are executed in a first come first serve manner.
        """
        if callable(hook):
            with recording(self, traceAlways) as sbuf:
                print >> sbuf, "Adding tearDown hook:", getsource_if_available(hook)
            self.hooks.append(hook)

    def tearDown(self):
        """Fixture for unittest test case teardown."""
        #import traceback
        #traceback.print_stack()

        # This is for the case of directly spawning 'lldb' and interacting with it
        # using pexpect.
        import pexpect
        if self.child and self.child.isalive():
            with recording(self, traceAlways) as sbuf:
                print >> sbuf, "tearing down the child process...."
            try:
                if self.child_in_script_interpreter:
                    self.child.sendline('quit()')
                    self.child.expect_exact(self.child_prompt)
                self.child.sendline('settings set interpreter.prompt-on-quit false')
                self.child.sendline('quit')
            except ValueError, ExceptionPexpect:
                # child is already terminated
            # Give it one final blow to make sure the child is terminated.
            self.child.close()
        # Check and run any hook functions.
        for hook in reversed(self.hooks):
            with recording(self, traceAlways) as sbuf:
                print >> sbuf, "Executing tearDown hook:", getsource_if_available(hook)
            hook()

        del self.hooks

        # Perform registered teardown cleanup.
        if doCleanup and self.doTearDownCleanup:
            self.cleanup(dictionary=self.dict)

        # In rare cases where there are multiple teardown cleanups added.
        if doCleanup and self.doTearDownCleanups:
            if self.dicts:
                for dict in reversed(self.dicts):

        # Decide whether to dump the session info.
        self.dumpSessionInfo()

    # =========================================================
    # Various callbacks to allow introspection of test progress
    # =========================================================

    def markError(self):
        """Callback invoked when an error (unexpected exception) errored."""
        self.__errored__ = True
        with recording(self, False) as sbuf:
            # False because there's no need to write "ERROR" to the stderr twice.
            # Once by the Python unittest framework, and a second time by us.
            print >> sbuf, "ERROR"

    def markFailure(self):
        """Callback invoked when a failure (test assertion failure) occurred."""
        self.__failed__ = True
        with recording(self, False) as sbuf:
            # False because there's no need to write "FAIL" to the stderr twice.
            # Once by the Python unittest framework, and a second time by us.
            print >> sbuf, "FAIL"

    def markExpectedFailure(self,err,bugnumber):
        """Callback invoked when an expected failure/error occurred."""
        self.__expected__ = True
        with recording(self, False) as sbuf:
            # False because there's no need to write "expected failure" to the
            # stderr twice.
            # Once by the Python unittest framework, and a second time by us.
            if bugnumber == None:
                print >> sbuf, "expected failure"
            else:
                print >> sbuf, "expected failure (problem id:" + str(bugnumber) + ")"	
    def markSkippedTest(self):
        """Callback invoked when a test is skipped."""
        self.__skipped__ = True
        with recording(self, False) as sbuf:
            # False because there's no need to write "skipped test" to the
            # stderr twice.
            # Once by the Python unittest framework, and a second time by us.
            print >> sbuf, "skipped test"

    def markUnexpectedSuccess(self, bugnumber):
        """Callback invoked when an unexpected success occurred."""
        self.__unexpected__ = True
        with recording(self, False) as sbuf:
            # False because there's no need to write "unexpected success" to the
            # stderr twice.
            # Once by the Python unittest framework, and a second time by us.
            if bugnumber == None:
                print >> sbuf, "unexpected success"
            else:
                print >> sbuf, "unexpected success (problem id:" + str(bugnumber) + ")"	

    def dumpSessionInfo(self):
        """
        Dump the debugger interactions leading to a test error/failure.  This
        allows for more convenient postmortem analysis.

        See also LLDBTestResult (dotest.py) which is a singlton class derived
        from TextTestResult and overwrites addError, addFailure, and
        addExpectedFailure methods to allow us to to mark the test instance as
        such.
        """

        # We are here because self.tearDown() detected that this test instance
        # either errored or failed.  The lldb.test_result singleton contains
        # two lists (erros and failures) which get populated by the unittest
        # framework.  Look over there for stack trace information.
        #
        # The lists contain 2-tuples of TestCase instances and strings holding
        # formatted tracebacks.
        #
        # See http://docs.python.org/library/unittest.html#unittest.TestResult.
        if self.__errored__:
            pairs = lldb.test_result.errors
            prefix = 'Error'
        elif self.__failed__:
            pairs = lldb.test_result.failures
            prefix = 'Failure'
        elif self.__expected__:
            pairs = lldb.test_result.expectedFailures
            prefix = 'ExpectedFailure'
        elif self.__unexpected__:
            prefix = "UnexpectedSuccess"
        else:
            # Simply return, there's no session info to dump!
            return

        if not self.__unexpected__ and not self.__skipped__:
            for test, traceback in pairs:
                if test is self:
                    print >> self.session, traceback

        testMethod = getattr(self, self._testMethodName)
        if getattr(testMethod, "__benchmarks_test__", False):
            benchmarks = True
        else:
            benchmarks = False

        # This records the compiler version used for the test.
        system([self.getCompiler(), "-v"], sender=self)

        dname = os.path.join(os.environ["LLDB_TEST"],
                             os.environ["LLDB_SESSION_DIRNAME"])
        if not os.path.isdir(dname):
            os.mkdir(dname)
        fname = os.path.join(dname, "%s-%s-%s-%s.log" % (prefix, self.getArchitecture(), "_".join(self.getCompiler().split('/')), self.id()))
        with open(fname, "w") as f:
            import datetime
            print >> f, "Session info generated @", datetime.datetime.now().ctime()
            print >> f, self.session.getvalue()
            print >> f, "To rerun this test, issue the following command from the 'test' directory:\n"
            print >> f, "./dotest.py %s -v %s -f %s.%s" % (self.getRunOptions(),
                                                           ('+b' if benchmarks else '-t'),
                                                           self.__class__.__name__,
                                                           self._testMethodName)

    # ====================================================
    # Config. methods supported through a plugin interface
    # (enables reading of the current test configuration)
    # ====================================================

    def getArchitecture(self):
        """Returns the architecture in effect the test suite is running with."""
        module = builder_module()
        return module.getArchitecture()

    def getCompiler(self):
        """Returns the compiler in effect the test suite is running with."""
        module = builder_module()
        return module.getCompiler()

    def getRunOptions(self):
        """Command line option for -A and -C to run this test again, called from
        self.dumpSessionInfo()."""
        arch = self.getArchitecture()
        comp = self.getCompiler()
        if arch:
            option_str = "-A " + arch