commit 287f7d64c617bdfa4646184782ed22421d5cab3b Author: Damian Johnson atagar@torproject.org Date: Mon Mar 9 09:10:21 2015 -0700
Move run_tests.py argument handling to its own file
This is a pattern I like that we used for our interpreter. Our run_tests.py was getting a bit long, and this is funtionality that really does belong in its own file. --- run_tests.py | 136 ++----------------------------------- stem/interpreter/arguments.py | 10 +-- test/arguments.py | 149 +++++++++++++++++++++++++++++++++++++++++ test/util.py | 34 +++------- 4 files changed, 165 insertions(+), 164 deletions(-)
diff --git a/run_tests.py b/run_tests.py index 876b296..ce732f7 100755 --- a/run_tests.py +++ b/run_tests.py @@ -6,8 +6,6 @@ Runs unit and integration tests. For usage information run this with '--help'. """
-import collections -import getopt import os import sys import threading @@ -26,43 +24,15 @@ import stem.util.log import stem.util.system import stem.util.test_tools
+import test.arguments import test.output import test.runner import test.util
from test.output import STATUS, SUCCESS, ERROR, NO_NL, STDERR, println -from test.util import STEM_BASE, Target, Task - -# Our default arguments. The _get_args() function provides a named tuple of -# this merged with our argv. -# -# Integration targets fall into two categories: -# -# * Run Targets (like RUN_COOKIE and RUN_PTRACE) which customize our torrc. -# We do an integration test run for each run target we get. -# -# * Attribute Target (like CHROOT and ONLINE) which indicates -# non-configuration changes to your test runs. These are applied to all -# integration runs that we perform. - -ARGS = { - 'run_unit': False, - 'run_integ': False, - 'specific_test': None, - 'logging_runlevel': None, - 'tor_path': 'tor', - 'run_targets': [Target.RUN_OPEN], - 'attribute_targets': [], - 'quiet': False, - 'verbose': False, - 'print_help': False, -} - -OPT = 'auit:l:qvh' -OPT_EXPANDED = ['all', 'unit', 'integ', 'targets=', 'test=', 'log=', 'tor=', 'quiet', 'verbose', 'help'] +from test.util import STEM_BASE, Task
CONFIG = stem.util.conf.config_dict('test', { - 'target.torrc': {}, 'integ.test_directory': './test/data', })
@@ -74,11 +44,6 @@ SRC_PATHS = [os.path.join(STEM_BASE, path) for path in ( os.path.join('docs', 'roles.py'), )]
-LOG_TYPE_ERROR = """\ -'%s' isn't a logging runlevel, use one of the following instead: - TRACE, DEBUG, INFO, NOTICE, WARN, ERROR -""" - MOCK_UNAVAILABLE_MSG = """\ To run stem's tests you'll need mock...
@@ -122,10 +87,7 @@ def main(): test_config.load(os.path.join(STEM_BASE, 'test', 'settings.cfg'))
try: - args = _get_args(sys.argv[1:]) - except getopt.GetoptError as exc: - println('%s (for usage provide --help)' % exc) - sys.exit(1) + args = test.arguments.parse(sys.argv[1:]) except ValueError as exc: println(str(exc)) sys.exit(1) @@ -134,7 +96,7 @@ def main(): test.output.SUPPRESS_STDOUT = True
if args.print_help: - println(test.util.get_help_message()) + println(test.arguments.get_help()) sys.exit() elif not args.run_unit and not args.run_integ: println('Nothing to run (for usage provide --help)\n') @@ -328,94 +290,6 @@ def main(): sys.exit(1 if error_tracker.has_errors_occured() else 0)
-def _get_args(argv): - """ - Parses our arguments, providing a named tuple with their values. - - :param list argv: input arguments to be parsed - - :returns: a **named tuple** with our parsed arguments - - :raises: **ValueError** if we got an invalid argument - :raises: **getopt.GetoptError** if the arguments don't conform with what we - accept - """ - - args = dict(ARGS) - - try: - recognized_args, unrecognized_args = getopt.getopt(argv, OPT, OPT_EXPANDED) - - if unrecognized_args: - error_msg = "aren't recognized arguments" if len(unrecognized_args) > 1 else "isn't a recognized argument" - raise getopt.GetoptError("'%s' %s" % ("', '".join(unrecognized_args), error_msg)) - except getopt.GetoptError as exc: - raise ValueError('%s (for usage provide --help)' % exc) - - for opt, arg in recognized_args: - if opt in ('-a', '--all'): - args['run_unit'] = True - args['run_integ'] = True - elif opt in ('-u', '--unit'): - args['run_unit'] = True - elif opt in ('-i', '--integ'): - args['run_integ'] = True - elif opt in ('-t', '--targets'): - run_targets, attribute_targets = [], [] - - integ_targets = arg.split(',') - all_run_targets = [t for t in Target if CONFIG['target.torrc'].get(t) is not None] - - # validates the targets and split them into run and attribute targets - - if not integ_targets: - raise ValueError('No targets provided') - - for target in integ_targets: - if target not in Target: - raise ValueError('Invalid integration target: %s' % target) - elif target in all_run_targets: - run_targets.append(target) - else: - attribute_targets.append(target) - - # check if we were told to use all run targets - - if Target.RUN_ALL in attribute_targets: - attribute_targets.remove(Target.RUN_ALL) - run_targets = all_run_targets - - # if no RUN_* targets are provided then keep the default (otherwise we - # won't have any tests to run) - - if run_targets: - args['run_targets'] = run_targets - - args['attribute_targets'] = attribute_targets - elif opt == '--test': - args['specific_test'] = arg - elif opt in ('-l', '--log'): - arg = arg.upper() - - if arg not in stem.util.log.LOG_VALUES: - raise ValueError(LOG_TYPE_ERROR % arg) - - args['logging_runlevel'] = arg - elif opt in ('--tor'): - args['tor_path'] = arg - elif opt in ('-q', '--quiet'): - args['quiet'] = True - elif opt in ('-v', '--verbose'): - args['verbose'] = True - elif opt in ('-h', '--help'): - args['print_help'] = True - - # translates our args dict into a named tuple - - Args = collections.namedtuple('Args', args.keys()) - return Args(**args) - - def _print_static_issues(static_check_issues): if static_check_issues: println('STATIC CHECKS', STATUS) @@ -465,7 +339,7 @@ def _run_test(args, test_class, output_filters, logging_buffer):
try: suite = unittest.TestLoader().loadTestsFromName(test_class) - except AttributeError, e: + except AttributeError: # should only come up if user provided '--test' for something that doesn't exist println(" no such test", ERROR) return None diff --git a/stem/interpreter/arguments.py b/stem/interpreter/arguments.py index d177b83..eadd043 100644 --- a/stem/interpreter/arguments.py +++ b/stem/interpreter/arguments.py @@ -22,13 +22,7 @@ DEFAULT_ARGS = { }
OPT = 'i:s:h' - -OPT_EXPANDED = [ - 'interface=', - 'socket=', - 'no-color', - 'help', -] +OPT_EXPANDED = ['interface=', 'socket=', 'no-color', 'help']
def parse(argv): @@ -50,7 +44,7 @@ def parse(argv): if unrecognized_args: error_msg = "aren't recognized arguments" if len(unrecognized_args) > 1 else "isn't a recognized argument" raise getopt.GetoptError("'%s' %s" % ("', '".join(unrecognized_args), error_msg)) - except getopt.GetoptError as exc: + except Exception as exc: raise ValueError('%s (for usage provide --help)' % exc)
for opt, arg in recognized_args: diff --git a/test/arguments.py b/test/arguments.py new file mode 100644 index 0000000..48cd383 --- /dev/null +++ b/test/arguments.py @@ -0,0 +1,149 @@ +# Copyright 2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Commandline argument parsing for our test runner. +""" + +import collections +import getopt + +import stem.util.conf +import stem.util.log + +from test.util import Target + +LOG_TYPE_ERROR = """\ +'%s' isn't a logging runlevel, use one of the following instead: + TRACE, DEBUG, INFO, NOTICE, WARN, ERROR +""" + +CONFIG = stem.util.conf.config_dict('test', { + 'msg.help': '', + 'target.description': {}, + 'target.torrc': {}, +}) + +DEFAULT_ARGS = { + 'run_unit': False, + 'run_integ': False, + 'specific_test': None, + 'logging_runlevel': None, + 'tor_path': 'tor', + 'run_targets': [Target.RUN_OPEN], + 'attribute_targets': [], + 'quiet': False, + 'verbose': False, + 'print_help': False, +} + +OPT = 'auit:l:qvh' +OPT_EXPANDED = ['all', 'unit', 'integ', 'targets=', 'test=', 'log=', 'tor=', 'quiet', 'verbose', 'help'] + + +def parse(argv): + """ + Parses our arguments, providing a named tuple with their values. + + :param list argv: input arguments to be parsed + + :returns: a **named tuple** with our parsed arguments + + :raises: **ValueError** if we got an invalid argument + """ + + args = dict(DEFAULT_ARGS) + + try: + recognized_args, unrecognized_args = getopt.getopt(argv, OPT, OPT_EXPANDED) + + if unrecognized_args: + error_msg = "aren't recognized arguments" if len(unrecognized_args) > 1 else "isn't a recognized argument" + raise getopt.GetoptError("'%s' %s" % ("', '".join(unrecognized_args), error_msg)) + except Exception as exc: + raise ValueError('%s (for usage provide --help)' % exc) + + for opt, arg in recognized_args: + if opt in ('-a', '--all'): + args['run_unit'] = True + args['run_integ'] = True + elif opt in ('-u', '--unit'): + args['run_unit'] = True + elif opt in ('-i', '--integ'): + args['run_integ'] = True + elif opt in ('-t', '--targets'): + run_targets, attribute_targets = [], [] + + integ_targets = arg.split(',') + all_run_targets = [t for t in Target if CONFIG['target.torrc'].get(t) is not None] + + # validates the targets and split them into run and attribute targets + + if not integ_targets: + raise ValueError('No targets provided') + + for target in integ_targets: + if target not in Target: + raise ValueError('Invalid integration target: %s' % target) + elif target in all_run_targets: + run_targets.append(target) + else: + attribute_targets.append(target) + + # check if we were told to use all run targets + + if Target.RUN_ALL in attribute_targets: + attribute_targets.remove(Target.RUN_ALL) + run_targets = all_run_targets + + # if no RUN_* targets are provided then keep the default (otherwise we + # won't have any tests to run) + + if run_targets: + args['run_targets'] = run_targets + + args['attribute_targets'] = attribute_targets + elif opt == '--test': + args['specific_test'] = arg + elif opt in ('-l', '--log'): + arg = arg.upper() + + if arg not in stem.util.log.LOG_VALUES: + raise ValueError(LOG_TYPE_ERROR % arg) + + args['logging_runlevel'] = arg + elif opt in ('--tor'): + args['tor_path'] = arg + elif opt in ('-q', '--quiet'): + args['quiet'] = True + elif opt in ('-v', '--verbose'): + args['verbose'] = True + elif opt in ('-h', '--help'): + args['print_help'] = True + + # translates our args dict into a named tuple + + Args = collections.namedtuple('Args', args.keys()) + return Args(**args) + + +def get_help(): + """ + Provides usage information, as provided by the '--help' argument. This + includes a listing of the valid integration targets. + + :returns: **str** with our usage information + """ + + help_msg = CONFIG['msg.help'] + + # gets the longest target length so we can show the entries in columns + target_name_length = max(map(len, Target)) + description_format = '\n %%-%is - %%s' % target_name_length + + for target in Target: + help_msg += description_format % (target, CONFIG['target.description'].get(target, '')) + + help_msg += '\n' + + return help_msg diff --git a/test/util.py b/test/util.py index 55243a3..5614e4f 100644 --- a/test/util.py +++ b/test/util.py @@ -11,7 +11,6 @@ Helper functions for our test framework.
get_prereq - provides the tor version required to run the given target get_torrc_entries - provides the torrc entries for a given target - get_help_message - provides usage information for running our tests
Sets of :class:`~test.util.Task` instances can be ran with :func:`~test.util.run_tasks`. Functions that are intended for easy use with @@ -45,8 +44,6 @@ import test.output from test.output import STATUS, ERROR, NO_NL, println
CONFIG = stem.util.conf.config_dict('test', { - 'msg.help': '', - 'target.description': {}, 'target.prereq': {}, 'target.torrc': {}, 'integ.test_directory': './test/data', @@ -54,6 +51,15 @@ CONFIG = stem.util.conf.config_dict('test', { 'test.integ_tests': '', })
+# Integration targets fall into two categories: +# +# * Run Targets (like RUN_COOKIE and RUN_PTRACE) which customize our torrc. +# We do an integration test run for each run target we get. +# +# * Attribute Target (like CHROOT and ONLINE) which indicates +# non-configuration changes to your test runs. These are applied to all +# integration runs that we perform. + Target = stem.util.enum.UppercaseEnum( 'ONLINE', 'RELATIVE', @@ -121,28 +127,6 @@ def _get_tests(modules, module_prefix): yield '%s.%s' % (import_name, test)
-def get_help_message(): - """ - Provides usage information, as provided by the '--help' argument. This - includes a listing of the valid integration targets. - - :returns: **str** with our usage information - """ - - help_msg = CONFIG['msg.help'] - - # gets the longest target length so we can show the entries in columns - target_name_length = max(map(len, Target)) - description_format = '\n %%-%is - %%s' % target_name_length - - for target in Target: - help_msg += description_format % (target, CONFIG['target.description'].get(target, '')) - - help_msg += '\n' - - return help_msg - - def get_prereq(target): """ Provides the tor version required to run the given target. If the target
tor-commits@lists.torproject.org