[tor-commits] [ooni-probe/master] Implement a working ooniclient based off of trial

isis at torproject.org isis at torproject.org
Thu Oct 4 14:41:15 UTC 2012


commit 603d5cdc27a13aef4d9d25ffbd9108fb3bdbdcac
Author: Arturo Filastò <arturo at filasto.net>
Date:   Tue Sep 11 15:35:21 2012 +0000

    Implement a working ooniclient based off of trial
---
 ooni/nettest.py      |   97 +++++++++++++++++++++++++++
 ooni/oonicli.py      |   73 ++++-----------------
 ooni/plugoo/tests.py |    1 +
 ooni/reporter.py     |   16 ++---
 ooni/runner.py       |  177 +++++++++++++++++++++++++++++++++++++++++++++++++-
 5 files changed, 292 insertions(+), 72 deletions(-)

diff --git a/ooni/nettest.py b/ooni/nettest.py
new file mode 100644
index 0000000..fe8c05c
--- /dev/null
+++ b/ooni/nettest.py
@@ -0,0 +1,97 @@
+
+from twisted.python import log
+from twisted.trial import unittest, itrial
+
+pyunit = __import__('unittest')
+
+def _iterateTests(testSuiteOrCase):
+    """
+    Iterate through all of the test cases in C{testSuiteOrCase}.
+    """
+    try:
+        suite = iter(testSuiteOrCase)
+    except TypeError:
+        if not testSuiteOrCase.inputs:
+            yield testSuiteOrCase
+        else:
+            inputs = iter(testSuiteOrCase.inputs)
+            print "Detected Sub shit! %s" % inputs
+            for input in inputs:
+                yield testSuiteOrCase, input
+    else:
+        for test in suite:
+            for subtest in _iterateTests(test):
+                yield subtest
+
+
+class TestCase(unittest.TestCase):
+    """
+    A test case represents the minimum
+    """
+    def run(self, result, input):
+        """
+        Run the test case, storing the results in C{result}.
+
+        First runs C{setUp} on self, then runs the test method (defined in the
+        constructor), then runs C{tearDown}.  As with the standard library
+        L{unittest.TestCase}, the return value of these methods is disregarded.
+        In particular, returning a L{Deferred} has no special additional
+        consequences.
+
+        @param result: A L{TestResult} object.
+        """
+        log.msg("--> %s <--" % (self.id()))
+        new_result = itrial.IReporter(result, None)
+        if new_result is None:
+            result = PyUnitResultAdapter(result)
+        else:
+            result = new_result
+        result.startTest(self)
+        if self.getSkip(): # don't run test methods that are marked as .skip
+            result.addSkip(self, self.getSkip())
+            result.stopTest(self)
+            return
+
+        self._passed = False
+        self._warnings = []
+
+        self._installObserver()
+        # All the code inside _runFixturesAndTest will be run such that warnings
+        # emitted by it will be collected and retrievable by flushWarnings.
+        unittest._collectWarnings(self._warnings.append, self._runFixturesAndTest, result)
+
+        # Any collected warnings which the test method didn't flush get
+        # re-emitted so they'll be logged or show up on stdout or whatever.
+        for w in self.flushWarnings():
+            try:
+                warnings.warn_explicit(**w)
+            except:
+                result.addError(self, failure.Failure())
+
+        result.stopTest(self)
+
+
+class TestSuite(pyunit.TestSuite):
+    """
+    Extend the standard library's C{TestSuite} with support for the visitor
+    pattern and a consistently overrideable C{run} method.
+    """
+
+    def __call__(self, result, input):
+        return self.run(result, input)
+
+
+    def run(self, result, input):
+        """
+        Call C{run} on every member of the suite.
+        """
+        # we implement this because Python 2.3 unittest defines this code
+        # in __call__, whereas 2.4 defines the code in run.
+        for test in self._tests:
+            if result.shouldStop:
+                break
+            print test
+            print "----------------"
+            test(result, input)
+        return result
+
diff --git a/ooni/oonicli.py b/ooni/oonicli.py
index 8ace160..199b4d4 100644
--- a/ooni/oonicli.py
+++ b/ooni/oonicli.py
@@ -22,19 +22,8 @@ from twisted.python.filepath import FilePath
 from twisted import plugin
 from twisted.python.util import spewer
 from twisted.python.compat import set
-from twisted.trial import runner, itrial, reporter
-
-
-# Yea, this is stupid.  Leave it for for command-line compatibility for a
-# while, though.
-TBFORMAT_MAP = {
-    'plain': 'default',
-    'default': 'default',
-    'emacs': 'brief',
-    'brief': 'brief',
-    'cgitb': 'verbose',
-    'verbose': 'verbose'
-    }
+from twisted.trial import itrial
+from ooni import runner, reporter
 
 
 def _parseLocalVariables(line):
@@ -98,26 +87,6 @@ def isTestFile(filename):
             and os.path.splitext(basename)[1] == ('.py'))
 
 
-def _reporterAction():
-    return usage.CompleteList([p.longOpt for p in
-                               plugin.getPlugins(itrial.IReporter)])
-
-class Options(usage.Options):
-
-    optParameters = [
-        ['parallelism', 'n', 10, "Specify the number of parallel tests to run"],
-        ['output', 'o', 'report.log', "Specify output report file"],
-        ['log', 'l', 'oonicli.log', "Specify output log file"]
-    ]
-
-    def opt_version(self):
-        """
-        Display OONI version and exit.
-        """
-        print "OONI version:", __version__
-        sys.exit(0)
-
-
 class Options(usage.Options, app.ReactorSelectionMixin):
     synopsis = """%s [options] [[file|package|module|TestCase|testmethod]...]
     """ % (os.path.basename(sys.argv[0]),)
@@ -136,12 +105,11 @@ class Options(usage.Options, app.ReactorSelectionMixin):
                 ["nopm", None, "don't automatically jump into debugger for "
                  "postmorteming of exceptions"],
                 ["dry-run", 'n', "do everything but run the tests"],
-                ["force-gc", None, "Have Trial run gc.collect() before and "
+                ["force-gc", None, "Have OONI run gc.collect() before and "
                  "after each test case."],
                 ["profile", None, "Run tests under the Python profiler"],
                 ["unclean-warnings", None,
                  "Turn dirty reactor errors into warnings"],
-                ["until-failure", "u", "Repeat test until it fails"],
                 ["no-recurse", "N", "Don't recurse into packages"],
                 ['help-reporters', None,
                  "Help on available output plugins (reporters)"]
@@ -153,13 +121,12 @@ class Options(usage.Options, app.ReactorSelectionMixin):
          "Run tests in random order using the specified seed"],
         ['temp-directory', None, '_trial_temp',
          'Path to use as working directory for tests.'],
-        ['reporter', None, 'verbose',
+        ['reporter', None, 'default',
          'The reporter to use for this test run.  See --help-reporters for '
          'more info.']]
 
     compData = usage.Completions(
         optActions={"tbformat": usage.CompleteList(["plain", "emacs", "cgitb"]),
-                    "reporter": _reporterAction,
                     "logfile": usage.CompleteFiles(descr="log file name"),
                     "random": usage.Completer(descr="random seed")},
         extraActions=[usage.CompleteFiles(
@@ -167,7 +134,7 @@ class Options(usage.Options, app.ReactorSelectionMixin):
                 repeat=True)],
         )
 
-    fallbackReporter = reporter.TreeReporter
+    fallbackReporter = reporter.OONIReporter
     tracer = None
 
     def __init__(self):
@@ -208,7 +175,7 @@ class Options(usage.Options, app.ReactorSelectionMixin):
         # value to the test suite as a module.
         #
         # This parameter allows automated processes (like Buildbot) to pass
-        # a list of files to Trial with the general expectation of "these files,
+        # a list of files to OONI with the general expectation of "these files,
         # whatever they are, will get tested"
         if not os.path.isfile(filename):
             sys.stderr.write("File %r doesn't exist\n" % (filename,))
@@ -300,21 +267,11 @@ class Options(usage.Options, app.ReactorSelectionMixin):
         self['tests'].update(args)
 
 
-    def _loadReporterByName(self, name):
-        for p in plugin.getPlugins(itrial.IReporter):
-            qual = "%s.%s" % (p.module, p.klass)
-            if p.longOpt == name:
-                return reflect.namedAny(qual)
-        raise usage.UsageError("Only pass names of Reporter plugins to "
-                               "--reporter. See --help-reporters for "
-                               "more info.")
-
-
     def postOptions(self):
         # Only load reporters now, as opposed to any earlier, to avoid letting
         # application-defined plugins muck up reactor selecting by importing
         # t.i.reactor and causing the default to be installed.
-        self['reporter'] = self._loadReporterByName(self['reporter'])
+        self['reporter'] = reporter.OONIReporter
 
         if 'tbformat' not in self:
             self['tbformat'] = 'default'
@@ -338,10 +295,10 @@ def _initialDebugSetup(config):
 def _getSuite(config):
     loader = _getLoader(config)
     recurse = not config['no-recurse']
+    print "loadByNames %s" % config['tests']
     return loader.loadByNames(config['tests'], recurse)
 
 
-
 def _getLoader(config):
     loader = runner.TestLoader()
     if config['random']:
@@ -349,8 +306,6 @@ def _getLoader(config):
         randomer.seed(config['random'])
         loader.sorter = lambda x : randomer.random()
         print 'Running tests shuffled with seed %d\n' % config['random']
-    if not config['until-failure']:
-        loader.suiteFactory = runner.DestructiveTestSuite
     return loader
 
 
@@ -358,10 +313,11 @@ def _getLoader(config):
 def _makeRunner(config):
     mode = None
     if config['debug']:
-        mode = runner.TrialRunner.DEBUG
+        mode = runner.OONIRunner.DEBUG
     if config['dry-run']:
-        mode = runner.TrialRunner.DRY_RUN
-    return runner.TrialRunner(config['reporter'],
+        mode = runner.OONIRunner.DRY_RUN
+    print "using %s" % config['reporter']
+    return runner.OONIRunner(config['reporter'],
                               mode=mode,
                               profile=config['profile'],
                               logfile=config['logfile'],
@@ -384,10 +340,7 @@ def run():
     _initialDebugSetup(config)
     trialRunner = _makeRunner(config)
     suite = _getSuite(config)
-    if config['until-failure']:
-        test_result = trialRunner.runUntilFailure(suite)
-    else:
-        test_result = trialRunner.run(suite)
+    test_result = trialRunner.run(suite)
     if config.tracer:
         sys.settrace(None)
         results = config.tracer.results()
diff --git a/ooni/plugoo/tests.py b/ooni/plugoo/tests.py
index 5fad85e..2b1e87c 100644
--- a/ooni/plugoo/tests.py
+++ b/ooni/plugoo/tests.py
@@ -22,6 +22,7 @@ class OONITest(object):
     developer to benefit from OONIs reporting system and command line argument
     parsing system.
     """
+    name = "oonitest"
     # By default we set this to False, meaning that we don't block
     blocking = False
     reactor = None
diff --git a/ooni/reporter.py b/ooni/reporter.py
index 0ecf2ea..d20160f 100644
--- a/ooni/reporter.py
+++ b/ooni/reporter.py
@@ -1,13 +1,11 @@
 from twisted.trial import reporter
 
-class TestResult(reporter.TestResult):
-    """
-    Accumulates the results of several ooni.nettest.TestCases.
-
-    The output format of a TestResult is YAML and it will contain all the basic
-    information that a test result should contain.
-    """
-    def __init__(self):
-        super(TestResult, self).__init__()
+class OONIReporter(reporter.Reporter):
+
+    def startTest(self, test, input=None):
+        print "Running %s" % test
+        print "Input %s" % input
+        self._input = input
+        super(OONIReporter, self).startTest(test)
 
 
diff --git a/ooni/runner.py b/ooni/runner.py
index d7caa9d..c6ad90b 100644
--- a/ooni/runner.py
+++ b/ooni/runner.py
@@ -1,7 +1,178 @@
-from twisted.trial import runner
+import types
+import time
+import inspect
 
+from twisted.internet import defer
+from twisted.trial import unittest
+from twisted.trial.runner import TrialRunner, TestLoader
+from twisted.trial.runner import isPackage, isTestCase
 
-class TestLoader(runner.TestLoader):
-    pass
+from ooni import nettest
+from ooni.plugoo import tests as oonitests
+
+def isLegacyTest(obj):
+    """
+    Returns True if the test in question is written using the OONITest legacy
+    class.
+    We do this for backward compatibility of the OONIProbe API.
+    """
+    try:
+        return issubclass(obj, oonitests.OONITest)
+    except TypeError:
+        return False
+
+def adaptLegacyTest(obj):
+    """
+    We take a legacy OONITest class and convert it into a nettest.TestCase.
+    This allows backward compatibility of old OONI tests.
+
+    XXX perhaps we could implement another extra layer that makes the even
+    older test cases compatible with the new OONI.
+    """
+    class LegacyOONITest(nettest.TestCase):
+        pass
+
+
+class LoggedSuite(nettest.TestSuite):
+    """
+    Any errors logged in this suite will be reported to the L{TestResult}
+    object.
+    """
+
+    def run(self, result, input):
+        """
+        Run the suite, storing all errors in C{result}. If an error is logged
+        while no tests are running, then it will be added as an error to
+        C{result}.
+
+        @param result: A L{TestResult} object.
+        """
+        observer = unittest._logObserver
+        observer._add()
+        super(LoggedSuite, self).run(result, input)
+        observer._remove()
+        for error in observer.getErrors():
+            result.addError(TestHolder(NOT_IN_TEST), error)
+        observer.flushErrors()
+
+
+class OONISuite(nettest.TestSuite):
+    """
+    Suite to wrap around every single test in a C{trial} run. Used internally
+    by OONI to set up things necessary for OONI tests to work, regardless of
+    what context they are run in.
+    """
+
+    def __init__(self, tests=()):
+        suite = LoggedSuite(tests)
+        super(OONISuite, self).__init__([suite])
+
+    def _bail(self):
+        from twisted.internet import reactor
+        d = defer.Deferred()
+        reactor.addSystemEventTrigger('after', 'shutdown',
+                                      lambda: d.callback(None))
+        reactor.fireSystemEvent('shutdown') # radix's suggestion
+        # As long as TestCase does crap stuff with the reactor we need to
+        # manually shutdown the reactor here, and that requires util.wait
+        # :(
+        # so that the shutdown event completes
+        nettest.TestCase('mktemp')._wait(d)
+
+    def run(self, result, input):
+        try:
+            nettest.TestSuite.run(self, result, input)
+        finally:
+            self._bail()
+
+
+class OONIRunner(TrialRunner):
+    def run(self, test):
+        return TrialRunner.run(self, test)
+
+    def _runWithoutDecoration(self, test):
+        """
+        Private helper that runs the given test but doesn't decorate it.
+        """
+        result = self._makeResult()
+        # decorate the suite with reactor cleanup and log starting
+        # This should move out of the runner and be presumed to be
+        # present
+        suite = OONISuite([test])
+        print "HERE IS THE TEST:"
+        print test
+        print "-------------"
+        try:
+            inputs = test.inputs
+        except:
+            inputs = [None]
+
+        startTime = time.time()
+        if self.mode == self.DRY_RUN:
+            for single in nettest._iterateTests(suite):
+                input = None
+                if type(single) == type(tuple()):
+                    single, input = single
+                result.startTest(single, input)
+                result.addSuccess(single)
+                result.stopTest(single)
+        else:
+            if self.mode == self.DEBUG:
+                # open question - should this be self.debug() instead.
+                debugger = self._getDebugger()
+                run = lambda x: debugger.runcall(suite.run, result, x)
+            else:
+                run = lambda x: suite.run(result, x)
+
+            oldDir = self._setUpTestdir()
+            try:
+                self._setUpLogFile()
+                # XXX work on this better
+                for input in inputs:
+                    run(input)
+            finally:
+                self._tearDownLogFile()
+                self._tearDownTestdir(oldDir)
+
+        endTime = time.time()
+        done = getattr(result, 'done', None)
+        if done is None:
+            warnings.warn(
+                "%s should implement done() but doesn't. Falling back to "
+                "printErrors() and friends." % reflect.qual(result.__class__),
+                category=DeprecationWarning, stacklevel=3)
+            result.printErrors()
+            result.writeln(result.separator)
+            result.writeln('Ran %d tests in %.3fs', result.testsRun,
+                           endTime - startTime)
+            result.write('\n')
+            result.printSummary()
+        else:
+            result.done()
+        return result
+
+
+class TestLoader(TestLoader):
+    """
+    Reponsible for finding the modules that can work as tests and running them.
+    If we detect that a certain test is written using the legacy OONI API we
+    will wrap it around a next gen class to make it work here too.
+    """
+    def __init__(self):
+        super(TestLoader, self).__init__()
+        self.suiteFactory = nettest.TestSuite
+
+    def findTestClasses(self, module):
+        classes = []
+        for name, val in inspect.getmembers(module):
+            if isTestCase(val):
+                classes.append(val)
+            # This is here to allow backward compatibility with legacy OONI
+            # tests.
+            elif isLegacyTest(val):
+                #val = adaptLegacyTest(val)
+                classes.append(val)
+        return self.sort(classes)
+        #return runner.TestLoader.findTestClasses(self, module)
 
 





More information about the tor-commits mailing list