tor-commits
Threads by month
- ----- 2025 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
April 2013
- 19 participants
- 1497 discussions
14 Apr '13
commit 6076f4cf8375565acdde8dfbfffe2be4a2327480
Author: Damian Johnson <atagar(a)torproject.org>
Date: Fri Apr 12 09:13:30 2013 -0700
Moving clean_orphaned_pyc() to test utils
Moving the functional bit of clean_orphaned_pyc() to the test utilities (the
module specifically will *not* be used for generating output).
---
run_tests.py | 42 +++++++++++-------------------------------
test/util.py | 46 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 57 insertions(+), 31 deletions(-)
diff --git a/run_tests.py b/run_tests.py
index 05f409c..cbb1941 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -46,36 +46,6 @@ base = os.path.sep.join(__file__.split(os.path.sep)[:-1]).lstrip("./")
SOURCE_BASE_PATHS = [os.path.join(base, path) for path in ('stem', 'test', 'run_tests.py')]
-def _clean_orphaned_pyc():
- test.output.print_noline(" checking for orphaned .pyc files... ", *test.runner.STATUS_ATTR)
-
- orphaned_pyc = []
-
- for base_dir in SOURCE_BASE_PATHS:
- for pyc_path in test.static_checks._get_files_with_suffix(base_dir, ".pyc"):
- # If we're running python 3 then the *.pyc files are no longer bundled
- # with the *.py. Rather, they're in a __pycache__ directory.
- #
- # At the moment there's no point in checking for orphaned bytecode with
- # python 3 because it's an exported copy of the python 2 codebase, so
- # skipping.
-
- if "__pycache__" in pyc_path:
- continue
-
- if not os.path.exists(pyc_path[:-1]):
- orphaned_pyc.append(pyc_path)
-
- if not orphaned_pyc:
- # no orphaned files, nothing to do
- test.output.print_line("done", *test.runner.STATUS_ATTR)
- else:
- print
- for pyc_file in orphaned_pyc:
- test.output.print_error(" removing %s" % pyc_file)
- os.remove(pyc_file)
-
-
def _python3_setup(python3_destination, clean):
# Python 2.7.3 added some nice capabilities to 2to3, like '--output-dir'...
#
@@ -321,7 +291,17 @@ if __name__ == '__main__':
test.output.print_divider("INITIALISING", True)
test.output.print_line("Performing startup activities...", *test.runner.STATUS_ATTR)
- _clean_orphaned_pyc()
+ test.output.print_noline(" checking for orphaned .pyc files... ", *test.runner.STATUS_ATTR)
+
+ orphaned_pyc = test.util.clean_orphaned_pyc(SOURCE_BASE_PATHS)
+
+ if not orphaned_pyc:
+ # no orphaned files, nothing to do
+ test.output.print_line("done", *test.runner.STATUS_ATTR)
+ else:
+ print
+ for pyc_file in orphaned_pyc:
+ test.output.print_error(" removed %s" % pyc_file)
print
diff --git a/test/util.py b/test/util.py
index 8ce61bf..bd5dd37 100644
--- a/test/util.py
+++ b/test/util.py
@@ -5,10 +5,16 @@ Helper functions for our test framework.
get_unit_tests - provides our unit tests
get_integ_tests - provides our integration tests
+
+ clean_orphaned_pyc - removes any *.pyc without a corresponding *.py
"""
+import os
+
import stem.util.conf
+import test.static_checks
+
CONFIG = stem.util.conf.config_dict("test", {
"test.unit_tests": "",
"test.integ_tests": "",
@@ -60,3 +66,43 @@ def _get_tests(modules, prefix):
module = getattr(module, subcomponent)
yield module
+
+
+def clean_orphaned_pyc(paths):
+ """
+ Deletes any file with a *.pyc extention without a corresponding *.py. This
+ helps to address a common gotcha when deleting python files...
+
+ * You delete module 'foo.py' and run the tests to ensure that you haven't
+ broken anything. They pass, however there *are* still some 'import foo'
+ statements that still work because the bytecode (foo.pyc) is still around.
+
+ * You push your change.
+
+ * Another developer clones our repository and is confused because we have a
+ bunch of ImportErrors.
+
+ :param list paths: paths to search for orphaned pyc files
+
+ :returns: list of files that we deleted
+ """
+
+ orphaned_pyc = []
+
+ for base_dir in paths:
+ for pyc_path in test.static_checks._get_files_with_suffix(base_dir, ".pyc"):
+ # If we're running python 3 then the *.pyc files are no longer bundled
+ # with the *.py. Rather, they're in a __pycache__ directory.
+ #
+ # At the moment there's no point in checking for orphaned bytecode with
+ # python 3 because it's an exported copy of the python 2 codebase, so
+ # skipping.
+
+ if "__pycache__" in pyc_path:
+ continue
+
+ if not os.path.exists(pyc_path[:-1]):
+ orphaned_pyc.append(pyc_path)
+ os.remove(pyc_path)
+
+ return orphaned_pyc
1
0
commit b1f3504ac7fac93b5a26fd02525cc962228aaea2
Author: Damian Johnson <atagar(a)torproject.org>
Date: Sat Apr 13 10:05:57 2013 -0700
Dropping stem.prereq.is_python_26()
The is_python_26() check was used to support python 2.5 hacks. Dropping this
and the hacks that it was supporting.
---
stem/descriptor/reader.py | 6 +-----
stem/prereq.py | 23 ++++-------------------
stem/process.py | 15 +++------------
test/integ/process.py | 19 ++-----------------
test/runner.py | 7 +------
5 files changed, 11 insertions(+), 59 deletions(-)
diff --git a/stem/descriptor/reader.py b/stem/descriptor/reader.py
index 023777a..d764ef6 100644
--- a/stem/descriptor/reader.py
+++ b/stem/descriptor/reader.py
@@ -418,11 +418,7 @@ class DescriptorReader(object):
continue
if os.path.isdir(target):
- if stem.prereq.is_python_26():
- walker = os.walk(target, followlinks = self._follow_links)
- else:
- walker = os.walk(target)
-
+ walker = os.walk(target, followlinks = self._follow_links)
self._handle_walker(walker, new_processed_files)
else:
self._handle_file(target, new_processed_files)
diff --git a/stem/prereq.py b/stem/prereq.py
index 41ba4b9..6017402 100644
--- a/stem/prereq.py
+++ b/stem/prereq.py
@@ -2,14 +2,10 @@
# See LICENSE for licensing information
"""
-Checks for stem dependencies. We require python 2.5 or greater (in the 2.x
-series). Other requirements for complete functionality are...
+Checks for stem dependencies. We require python 2.6 or greater (including the
+3.x series). Other requirements for complete functionality are...
-* Python 2.6
-
- * os.walk's followlinks argument
-
-* rsa module
+* pycrypto module
* validating descriptor signature integrity
@@ -17,11 +13,10 @@ series). Other requirements for complete functionality are...
check_requirements - checks for minimum requirements for running stem
- is_python_26 - checks if python 2.6 or later is available
is_python_27 - checks if python 2.7 or later is available
is_python_3 - checks if python 3.0 or later is available
- is_rsa_available - checks if the rsa module is available
+ is_crypto_available - checks if the pycrypto module is available
"""
import sys
@@ -43,16 +38,6 @@ def check_requirements():
raise ImportError("stem requires python version 2.6 or greater")
-def is_python_26():
- """
- Checks if we're running python 2.6 or above.
-
- :returns: bool that is True if we meet this requirement and False otherwise
- """
-
- return _check_version(6)
-
-
def is_python_27():
"""
Checks if we're running python 2.7 or above.
diff --git a/stem/process.py b/stem/process.py
index 45a064a..36307fc 100644
--- a/stem/process.py
+++ b/stem/process.py
@@ -113,14 +113,7 @@ def launch_tor(tor_cmd = "tor", args = None, torrc_path = None, completion_perce
except:
pass
- # We can't kill the subprocess on python 2.5 running Windows without the
- # win32process module...
- # http://stackoverflow.com/questions/552423/use-python-2-6-subprocess-module-…
-
- if stem.prereq.is_python_26():
- tor_process.kill()
- elif not stem.util.system.is_windows():
- os.kill(tor_process.pid, signal.SIGTERM)
+ tor_process.kill()
raise OSError("reached a %i second timeout without success" % timeout)
@@ -147,10 +140,8 @@ def launch_tor(tor_cmd = "tor", args = None, torrc_path = None, completion_perce
signal.alarm(0) # stop alarm
# ... but best make sure
- if stem.prereq.is_python_26():
- tor_process.kill()
- elif not stem.util.system.is_windows():
- os.kill(tor_process.pid, signal.SIGTERM)
+
+ tor_process.kill()
raise OSError("Process terminated: %s" % last_problem)
diff --git a/test/integ/process.py b/test/integ/process.py
index 59663f3..414d233 100644
--- a/test/integ/process.py
+++ b/test/integ/process.py
@@ -21,11 +21,7 @@ from test import mocking
def _kill_process(process):
- if stem.prereq.is_python_26():
- process.kill()
- elif not stem.util.system.is_windows():
- os.kill(process.pid, signal.SIGTERM)
-
+ process.kill()
process.communicate() # block until its definitely gone
@@ -42,10 +38,6 @@ class TestProcess(unittest.TestCase):
Exercises launch_tor_with_config.
"""
- if not stem.prereq.is_python_26() and stem.util.system.is_windows():
- test.runner.skip(self, "(unable to kill subprocesses)")
- return
-
if test.runner.only_run_once(self, "test_launch_tor_with_config"):
return
@@ -83,10 +75,6 @@ class TestProcess(unittest.TestCase):
Runs launch_tor where it times out before completing.
"""
- if not stem.prereq.is_python_26() and stem.util.system.is_windows():
- test.runner.skip(self, "(unable to kill subprocesses)")
- return
-
if test.runner.only_run_once(self, "test_launch_tor_with_timeout"):
return
@@ -105,10 +93,7 @@ class TestProcess(unittest.TestCase):
test this we spawn a process and trick tor into thinking that it is us.
"""
- if not stem.prereq.is_python_26() and stem.util.system.is_windows():
- test.runner.skip(self, "(unable to kill subprocesses)")
- return
- elif not stem.util.system.is_available("sleep"):
+ if not stem.util.system.is_available("sleep"):
test.runner.skip(self, "('sleep' command is unavailable)")
return
elif test.runner.only_run_once(self, "test_take_ownership_via_pid"):
diff --git a/test/runner.py b/test/runner.py
index aa4e4a9..bcff7a0 100644
--- a/test/runner.py
+++ b/test/runner.py
@@ -364,12 +364,7 @@ class Runner(object):
# an OSError ([Errno 3] No such process)
try:
- if stem.prereq.is_python_26():
- self._tor_process.kill()
- elif not stem.util.system.is_windows():
- os.kill(self._tor_process.pid, signal.SIGTERM)
- else:
- test.output.print_error("failed (unable to call kill() in python 2.5)")
+ self._tor_process.kill()
except OSError:
pass
1
0
14 Apr '13
commit 3d7bcee1aeba0f2ec2b8d808a1b57ba45ef731ae
Author: Damian Johnson <atagar(a)torproject.org>
Date: Fri Apr 12 23:14:06 2013 -0700
Merging static_checks.py with the test utils
Revising the static check functions and merging them with the test util module.
---
run_tests.py | 27 ++++--
test/__init__.py | 2 -
test/settings.cfg | 34 ++++++++
test/static_checks.py | 225 -------------------------------------------------
test/util.py | 160 ++++++++++++++++++++++++++++++++++-
5 files changed, 207 insertions(+), 241 deletions(-)
diff --git a/run_tests.py b/run_tests.py
index cbb1941..5e80441 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -23,7 +23,6 @@ from stem.util import log, system, term
import test.output
import test.runner
-import test.static_checks
import test.util
from test.runner import Target
@@ -47,6 +46,14 @@ SOURCE_BASE_PATHS = [os.path.join(base, path) for path in ('stem', 'test', 'run_
def _python3_setup(python3_destination, clean):
+ """
+ Exports the python3 counterpart of our codebase using 2to3.
+
+ :param str python3_destination: location to export our codebase to
+ :param bool clean: deletes our priorly exported codebase if **True**,
+ otherwise this is a no-op
+ """
+
# Python 2.7.3 added some nice capabilities to 2to3, like '--output-dir'...
#
# http://docs.python.org/2/library/2to3.html
@@ -94,8 +101,8 @@ def _python3_setup(python3_destination, clean):
return True
-def _print_style_issues(run_unit, run_integ, run_style):
- style_issues = test.static_checks.get_issues(SOURCE_BASE_PATHS)
+def _print_static_issues(run_unit, run_integ, run_style):
+ static_check_issues = {}
# If we're doing some sort of testing (unit or integ) and pyflakes is
# available then use it. Its static checks are pretty quick so there's not
@@ -103,23 +110,23 @@ def _print_style_issues(run_unit, run_integ, run_style):
if run_unit or run_integ:
if system.is_available("pyflakes"):
- style_issues.update(test.static_checks.pyflakes_issues(SOURCE_BASE_PATHS))
+ static_check_issues.update(test.util.get_pyflakes_issues(SOURCE_BASE_PATHS))
else:
test.output.print_error("Static error checking requires pyflakes. Please install it from ...\n http://pypi.python.org/pypi/pyflakes\n")
if run_style:
if system.is_available("pep8"):
- style_issues.update(test.static_checks.pep8_issues(SOURCE_BASE_PATHS))
+ static_check_issues = test.util.get_stylistic_issues(SOURCE_BASE_PATHS)
else:
test.output.print_error("Style checks require pep8. Please install it from...\n http://pypi.python.org/pypi/pep8\n")
- if style_issues:
- test.output.print_line("STYLE ISSUES", term.Color.BLUE, term.Attr.BOLD)
+ if static_check_issues:
+ test.output.print_line("STATIC CHECKS", term.Color.BLUE, term.Attr.BOLD)
- for file_path in style_issues:
+ for file_path in static_check_issues:
test.output.print_line("* %s" % file_path, term.Color.BLUE, term.Attr.BOLD)
- for line_number, msg in style_issues[file_path]:
+ for line_number, msg in static_check_issues[file_path]:
line_count = "%-4s" % line_number
test.output.print_line(" line %s - %s" % (line_count, msg))
@@ -411,7 +418,7 @@ if __name__ == '__main__':
# TODO: note unused config options afterward?
if not stem.prereq.is_python_3():
- _print_style_issues(run_unit, run_integ, run_style)
+ _print_static_issues(run_unit, run_integ, run_style)
runtime = time.time() - start_time
diff --git a/test/__init__.py b/test/__init__.py
index bc7c694..ff55c3a 100644
--- a/test/__init__.py
+++ b/test/__init__.py
@@ -12,7 +12,5 @@ __all__ = [
"output",
"prompt",
"runner",
- "static_checks",
- "tutorial",
"utils",
]
diff --git a/test/settings.cfg b/test/settings.cfg
index 625a482..9d28f4d 100644
--- a/test/settings.cfg
+++ b/test/settings.cfg
@@ -92,6 +92,40 @@ target.torrc RUN_SOCKET => SOCKET
target.torrc RUN_SCOOKIE => SOCKET, COOKIE
target.torrc RUN_PTRACE => PORT, PTRACE
+# PEP8 compliance issues that we're ignoreing...
+#
+# * E251 no spaces around keyword / parameter equals
+#
+# This one I dislike a great deal. It makes keyword arguments different
+# from assignments which looks... aweful. I'm not sure what PEP8's author
+# was on when he wrote this one but it's stupid.
+#
+# Someone else can change this if they really care.
+#
+# * E501 line is over 79 characters
+#
+# We're no longer on TTY terminals. Overly constraining line length makes
+# things far less readable, encouraging bad practices like abbreviated
+# variable names.
+#
+# If the code fits on my tiny netbook screen then it's narrow enough.
+#
+# * E111 and E121 four space indentations
+#
+# Ahhh, indentation. The holy war that'll never die. Sticking with two
+# space indentations since it leads to shorter lines.
+#
+# * E127 continuation line over-indented for visual indent
+#
+# Pep8 only works with this one if we have four space indents (its
+# detection is based on multiples of four).
+
+pep8.ignore E111
+pep8.ignore E121
+pep8.ignore E501
+pep8.ignore E251
+pep8.ignore E127
+
# False positives from pyflakes. These are mappings between the path and the
# issue.
diff --git a/test/static_checks.py b/test/static_checks.py
deleted file mode 100644
index 307a858..0000000
--- a/test/static_checks.py
+++ /dev/null
@@ -1,225 +0,0 @@
-# Copyright 2012-2013, Damian Johnson
-# See LICENSE for licensing information
-
-"""
-Performs a check that our python source code follows its whitespace conventions
-which are...
-
-* two space indentations
-* tabs are the root of all evil and should be shot on sight
-* standard newlines (\\n), not windows (\\r\\n) nor classic mac (\\r)
-"""
-
-import re
-import os
-
-from stem.util import conf, system
-
-# mapping of files to the issues that should be ignored
-PYFLAKES_IGNORE = None
-
-CONFIG = conf.config_dict("test", {
- "pyflakes.ignore": [],
- "integ.test_directory": "./test/data",
-})
-
-
-def pep8_issues(base_paths):
- """
- Checks for stylistic issues that are an issue according to the parts of PEP8
- we conform to.
-
- :param str,list base_paths: directory to be iterated over
-
- :returns: dict of the form ``path => [(line_number, message)...]``
- """
-
- if isinstance(base_paths, (tuple, list)):
- results = {}
-
- for path in base_paths:
- results.update(pep8_issues(path))
-
- return results
-
- # The pep8 command give output of the form...
- #
- # FILE:LINE:CHARACTER ISSUE
- #
- # ... for instance...
- #
- # ./test/mocking.py:868:31: E225 missing whitespace around operator
- #
- # Ignoring the following compliance issues.
- #
- # * E251 no spaces around keyword / parameter equals
- #
- # This one I dislike a great deal. It makes keyword arguments different
- # from assignments which looks... aweful. I'm not sure what PEP8's author
- # was on when he wrote this one but it's stupid.
- #
- # Someone else can change this if they really care.
- #
- # * E501 line is over 79 characters
- #
- # We're no longer on TTY terminals. Overly constraining line length makes
- # things far less readable, encouraging bad practices like abbreviated
- # variable names.
- #
- # If the code fits on my tiny netbook screen then it's narrow enough.
- #
- # * E111 and E121 four space indentations
- #
- # Ahhh, indentation. The holy war that'll never die. Sticking with two
- # space indentations since it leads to shorter lines.
- #
- # * E127 continuation line over-indented for visual indent
- #
- # Pep8 only works with this one if we have four space indents (its
- # detection is based on multiples of four).
-
- ignored_issues = "E111,E121,E501,E251,E127"
-
- issues = {}
- pep8_output = system.call("pep8 --ignore %s %s" % (ignored_issues, base_paths))
-
- for line in pep8_output:
- line_match = re.match("^(.*):(\d+):(\d+): (.*)$", line)
-
- if line_match:
- path, line, _, issue = line_match.groups()
-
- if not _is_test_data(path):
- issues.setdefault(path, []).append((int(line), issue))
-
- return issues
-
-
-def pyflakes_issues(base_paths):
- """
- Checks for issues via pyflakes. False positives can be whitelisted via our
- test configuration.
-
- :param str,list base_paths: directory to be iterated over
-
- :returns: dict of the form ``path => [(line_number, message)...]``
- """
-
- if isinstance(base_paths, (tuple, list)):
- results = {}
-
- for path in base_paths:
- results.update(pyflakes_issues(path))
-
- return results
-
- global PYFLAKES_IGNORE
-
- if PYFLAKES_IGNORE is None:
- pyflakes_ignore = {}
-
- for line in CONFIG["pyflakes.ignore"]:
- path, issue = line.split("=>")
- pyflakes_ignore.setdefault(path.strip(), []).append(issue.strip())
-
- PYFLAKES_IGNORE = pyflakes_ignore
-
- # Pyflakes issues are of the form...
- #
- # FILE:LINE: ISSUE
- #
- # ... for instance...
- #
- # stem/prereq.py:73: 'long_to_bytes' imported but unused
- # stem/control.py:957: undefined name 'entry'
-
- issues = {}
- pyflakes_output = system.call("pyflakes %s" % base_paths)
-
- for line in pyflakes_output:
- line_match = re.match("^(.*):(\d+): (.*)$", line)
-
- if line_match:
- path, line, issue = line_match.groups()
-
- if not _is_test_data(path) and not issue in PYFLAKES_IGNORE.get(path, []):
- issues.setdefault(path, []).append((int(line), issue))
-
- return issues
-
-
-def get_issues(base_paths):
- """
- Checks python source code in the given directory for whitespace issues.
-
- :param str,list base_paths: directory to be iterated over
-
- :returns: dict of the form ``path => [(line_number, message)...]``
- """
-
- if isinstance(base_paths, (tuple, list)):
- results = {}
-
- for path in base_paths:
- results.update(get_issues(path))
-
- return results
-
- # TODO: This does not check that block indentations are two spaces because
- # differentiating source from string blocks ("""foo""") is more of a pita
- # than I want to deal with right now.
-
- issues = {}
-
- for file_path in _get_files_with_suffix(base_paths):
- if _is_test_data(file_path):
- continue
-
- with open(file_path) as f:
- file_contents = f.read()
-
- lines, file_issues, prev_indent = file_contents.split("\n"), [], 0
- is_block_comment = False
-
- for index, line in enumerate(lines):
- whitespace, content = re.match("^(\s*)(.*)$", line).groups()
-
- if '"""' in content:
- is_block_comment = not is_block_comment
-
- if "\t" in whitespace:
- file_issues.append((index + 1, "indentation has a tab"))
- elif "\r" in content:
- file_issues.append((index + 1, "contains a windows newline"))
- elif content != content.rstrip():
- file_issues.append((index + 1, "line has trailing whitespace"))
-
- if file_issues:
- issues[file_path] = file_issues
-
- return issues
-
-
-def _is_test_data(path):
- return os.path.normpath(path).startswith(os.path.normpath(CONFIG["integ.test_directory"]))
-
-
-def _get_files_with_suffix(base_path, suffix = ".py"):
- """
- Iterates over files in a given directory, providing filenames with a certain
- suffix.
-
- :param str base_path: directory to be iterated over
- :param str suffix: filename suffix to look for
-
- :returns: iterator that yields the absolute path for files with the given suffix
- """
-
- if os.path.isfile(base_path):
- if base_path.endswith(suffix):
- yield base_path
- else:
- for root, _, files in os.walk(base_path):
- for filename in files:
- if filename.endswith(suffix):
- yield os.path.join(root, filename)
diff --git a/test/util.py b/test/util.py
index bd5dd37..9c0e23e 100644
--- a/test/util.py
+++ b/test/util.py
@@ -1,3 +1,6 @@
+# Copyright 2012-2013, Damian Johnson
+# See LICENSE for licensing information
+
"""
Helper functions for our test framework.
@@ -7,19 +10,27 @@ Helper functions for our test framework.
get_integ_tests - provides our integration tests
clean_orphaned_pyc - removes any *.pyc without a corresponding *.py
+ get_stylistic_issues - checks for PEP8 and other stylistic issues
+ get_pyflakes_issues - static checks for problems via pyflakes
"""
+import re
import os
import stem.util.conf
-
-import test.static_checks
+import stem.util.system
CONFIG = stem.util.conf.config_dict("test", {
+ "pep8.ignore": [],
+ "pyflakes.ignore": [],
+ "integ.test_directory": "./test/data",
"test.unit_tests": "",
"test.integ_tests": "",
})
+# mapping of files to the issues that should be ignored
+PYFLAKES_IGNORE = None
+
def get_unit_tests(prefix = None):
"""
@@ -89,8 +100,8 @@ def clean_orphaned_pyc(paths):
orphaned_pyc = []
- for base_dir in paths:
- for pyc_path in test.static_checks._get_files_with_suffix(base_dir, ".pyc"):
+ for path in paths:
+ for pyc_path in _get_files_with_suffix(path, ".pyc"):
# If we're running python 3 then the *.pyc files are no longer bundled
# with the *.py. Rather, they're in a __pycache__ directory.
#
@@ -106,3 +117,144 @@ def clean_orphaned_pyc(paths):
os.remove(pyc_path)
return orphaned_pyc
+
+
+def get_stylistic_issues(paths):
+ """
+ Checks for stylistic issues that are an issue according to the parts of PEP8
+ we conform to. This alsochecks a few other stylistic issues:
+
+ * two space indentations
+ * tabs are the root of all evil and should be shot on sight
+ * standard newlines (\\n), not windows (\\r\\n) nor classic mac (\\r)
+
+ :param list paths: paths to search for stylistic issues
+
+ :returns: dict of the form ``path => [(line_number, message)...]``
+ """
+
+ # The pep8 command give output of the form...
+ #
+ # FILE:LINE:CHARACTER ISSUE
+ #
+ # ... for instance...
+ #
+ # ./test/mocking.py:868:31: E225 missing whitespace around operator
+
+ ignored_issues = ','.join(CONFIG["pep8.ignore"])
+ issues = {}
+
+ for path in paths:
+ pep8_output = stem.util.system.call("pep8 --ignore %s %s" % (ignored_issues, path))
+
+ for line in pep8_output:
+ line_match = re.match("^(.*):(\d+):(\d+): (.*)$", line)
+
+ if line_match:
+ path, line, _, issue = line_match.groups()
+
+ if not _is_test_data(path):
+ issues.setdefault(path, []).append((int(line), issue))
+
+ for file_path in _get_files_with_suffix(path):
+ if _is_test_data(file_path):
+ continue
+
+ with open(file_path) as f:
+ file_contents = f.read()
+
+ lines, file_issues, prev_indent = file_contents.split("\n"), [], 0
+ is_block_comment = False
+
+ for index, line in enumerate(lines):
+ whitespace, content = re.match("^(\s*)(.*)$", line).groups()
+
+ # TODO: This does not check that block indentations are two spaces
+ # because differentiating source from string blocks ("""foo""") is more
+ # of a pita than I want to deal with right now.
+
+ if '"""' in content:
+ is_block_comment = not is_block_comment
+
+ if "\t" in whitespace:
+ file_issues.append((index + 1, "indentation has a tab"))
+ elif "\r" in content:
+ file_issues.append((index + 1, "contains a windows newline"))
+ elif content != content.rstrip():
+ file_issues.append((index + 1, "line has trailing whitespace"))
+
+ if file_issues:
+ issues[file_path] = file_issues
+
+ return issues
+
+
+def get_pyflakes_issues(paths):
+ """
+ Performs static checks via pyflakes.
+
+ :param list paths: paths to search for problems
+
+ :returns: dict of the form ``path => [(line_number, message)...]``
+ """
+
+ global PYFLAKES_IGNORE
+
+ if PYFLAKES_IGNORE is None:
+ pyflakes_ignore = {}
+
+ for line in CONFIG["pyflakes.ignore"]:
+ path, issue = line.split("=>")
+ pyflakes_ignore.setdefault(path.strip(), []).append(issue.strip())
+
+ PYFLAKES_IGNORE = pyflakes_ignore
+
+ # Pyflakes issues are of the form...
+ #
+ # FILE:LINE: ISSUE
+ #
+ # ... for instance...
+ #
+ # stem/prereq.py:73: 'long_to_bytes' imported but unused
+ # stem/control.py:957: undefined name 'entry'
+
+ issues = {}
+
+ for path in paths:
+ pyflakes_output = stem.util.system.call("pyflakes %s" % path)
+
+ for line in pyflakes_output:
+ line_match = re.match("^(.*):(\d+): (.*)$", line)
+
+ if line_match:
+ path, line, issue = line_match.groups()
+
+ if not _is_test_data(path) and not issue in PYFLAKES_IGNORE.get(path, []):
+ issues.setdefault(path, []).append((int(line), issue))
+
+ return issues
+
+
+def _is_test_data(path):
+ return os.path.normpath(path).startswith(os.path.normpath(CONFIG["integ.test_directory"]))
+
+
+def _get_files_with_suffix(base_path, suffix = ".py"):
+ """
+ Iterates over files in a given directory, providing filenames with a certain
+ suffix.
+
+ :param str base_path: directory to be iterated over
+ :param str suffix: filename suffix to look for
+
+ :returns: iterator that yields the absolute path for files with the given suffix
+ """
+
+ if os.path.isfile(base_path):
+ if base_path.endswith(suffix):
+ yield base_path
+ else:
+ for root, _, files in os.walk(base_path):
+ for filename in files:
+ if filename.endswith(suffix):
+ yield os.path.join(root, filename)
1
0
14 Apr '13
commit e7513978c0a39fb815eecaff2f8d9a8c958236df
Author: Damian Johnson <atagar(a)torproject.org>
Date: Sat Apr 13 12:41:22 2013 -0700
Less verbose usage of the test.output module
We use the print_line() function throughout the runner and run_tests.py.
Shortening it to 'println()' (pity we can't call it print()) and making its
usage a little nicer by flattening the input attributes.
---
run_tests.py | 59 +++++++++++++++++++++++++------------------------
test/integ/process.py | 1 -
test/output.py | 57 +++++++++++++++++++++++++++++++++--------------
test/runner.py | 42 +++++++++++++++-------------------
4 files changed, 89 insertions(+), 70 deletions(-)
diff --git a/run_tests.py b/run_tests.py
index 5e80441..4116e40 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -25,6 +25,7 @@ import test.output
import test.runner
import test.util
+from test.output import println, STATUS, ERROR, NO_NL
from test.runner import Target
OPT = "auist:l:h"
@@ -67,7 +68,7 @@ def _python3_setup(python3_destination, clean):
shutil.rmtree(python3_destination, ignore_errors = True)
if os.path.exists(python3_destination):
- test.output.print_error("Reusing '%s'. Run again with '--clean' if you want to recreate the python3 export." % python3_destination)
+ println("Reusing '%s'. Run again with '--clean' if you want to recreate the python3 export." % python3_destination, ERROR)
print
return True
@@ -81,21 +82,21 @@ def _python3_setup(python3_destination, clean):
else:
return []
- test.output.print_noline(" copying stem to '%s'... " % python3_destination, *test.runner.STATUS_ATTR)
+ println(" copying stem to '%s'... " % python3_destination, STATUS, NO_NL)
shutil.copytree('stem', os.path.join(python3_destination, 'stem'))
shutil.copytree('test', os.path.join(python3_destination, 'test'), ignore = _ignore)
shutil.copy('run_tests.py', os.path.join(python3_destination, 'run_tests.py'))
- test.output.print_line("done", *test.runner.STATUS_ATTR)
+ println("done", STATUS)
except OSError, exc:
- test.output.print_error("failed\n%s" % exc)
+ println("failed\n%s" % exc, ERROR)
return False
try:
- test.output.print_noline(" running 2to3... ", *test.runner.STATUS_ATTR)
+ println(" running 2to3... ", STATUS, NO_NL)
system.call("2to3 --write --nobackups --no-diffs %s" % python3_destination)
- test.output.print_line("done", *test.runner.STATUS_ATTR)
+ println("done", STATUS)
except OSError, exc:
- test.output.print_error("failed\n%s" % exc)
+ println("failed\n%s" % exc, ERROR)
return False
return True
@@ -112,23 +113,23 @@ def _print_static_issues(run_unit, run_integ, run_style):
if system.is_available("pyflakes"):
static_check_issues.update(test.util.get_pyflakes_issues(SOURCE_BASE_PATHS))
else:
- test.output.print_error("Static error checking requires pyflakes. Please install it from ...\n http://pypi.python.org/pypi/pyflakes\n")
+ println("Static error checking requires pyflakes. Please install it from ...\n http://pypi.python.org/pypi/pyflakes\n", ERROR)
if run_style:
if system.is_available("pep8"):
static_check_issues = test.util.get_stylistic_issues(SOURCE_BASE_PATHS)
else:
- test.output.print_error("Style checks require pep8. Please install it from...\n http://pypi.python.org/pypi/pep8\n")
+ println("Style checks require pep8. Please install it from...\n http://pypi.python.org/pypi/pep8\n", ERROR)
if static_check_issues:
- test.output.print_line("STATIC CHECKS", term.Color.BLUE, term.Attr.BOLD)
+ println("STATIC CHECKS", term.Color.BLUE, term.Attr.BOLD)
for file_path in static_check_issues:
- test.output.print_line("* %s" % file_path, term.Color.BLUE, term.Attr.BOLD)
+ println("* %s" % file_path, term.Color.BLUE, term.Attr.BOLD)
for line_number, msg in static_check_issues[file_path]:
line_count = "%-4s" % line_number
- test.output.print_line(" line %s - %s" % (line_count, msg))
+ println(" line %s - %s" % (line_count, msg))
print
@@ -259,7 +260,7 @@ if __name__ == '__main__':
if run_python3:
for required_cmd in ("2to3", "python3"):
if not system.is_available(required_cmd):
- test.output.print_error("Unable to test python 3 because %s isn't in your path" % required_cmd)
+ println("Unable to test python 3 because %s isn't in your path" % required_cmd, ERROR)
sys.exit(1)
if run_python3 and sys.version_info[0] != 3:
@@ -273,7 +274,7 @@ if __name__ == '__main__':
sys.exit(1) # failed to do python3 setup
if not run_unit and not run_integ and not run_style:
- test.output.print_line("Nothing to run (for usage provide --help)\n")
+ println("Nothing to run (for usage provide --help)\n")
sys.exit()
# if we have verbose logging then provide the testing config
@@ -297,18 +298,18 @@ if __name__ == '__main__':
test.output.print_divider("INITIALISING", True)
- test.output.print_line("Performing startup activities...", *test.runner.STATUS_ATTR)
- test.output.print_noline(" checking for orphaned .pyc files... ", *test.runner.STATUS_ATTR)
+ println("Performing startup activities...", STATUS)
+ println(" checking for orphaned .pyc files... ", STATUS, NO_NL)
orphaned_pyc = test.util.clean_orphaned_pyc(SOURCE_BASE_PATHS)
if not orphaned_pyc:
# no orphaned files, nothing to do
- test.output.print_line("done", *test.runner.STATUS_ATTR)
+ println("done", STATUS)
else:
print
for pyc_file in orphaned_pyc:
- test.output.print_error(" removed %s" % pyc_file)
+ println(" removed %s" % pyc_file, ERROR)
print
@@ -364,12 +365,12 @@ if __name__ == '__main__':
if opt in test.runner.Torrc.keys():
torrc_opts.append(test.runner.Torrc[opt])
else:
- test.output.print_line("'%s' isn't a test.runner.Torrc enumeration" % opt)
+ println("'%s' isn't a test.runner.Torrc enumeration" % opt)
sys.exit(1)
integ_runner.start(target, attribute_targets, tor_path, extra_torrc_opts = torrc_opts)
- test.output.print_line("Running tests...", term.Color.BLUE, term.Attr.BOLD)
+ println("Running tests...", term.Color.BLUE, term.Attr.BOLD)
print
for test_class in test.util.get_integ_tests(test_prefix):
@@ -391,15 +392,15 @@ if __name__ == '__main__':
active_threads = threading.enumerate()
if len(active_threads) > 1:
- test.output.print_error("Threads lingering after test run:")
+ println("Threads lingering after test run:", ERROR)
for lingering_thread in active_threads:
- test.output.print_error(" %s" % lingering_thread)
+ println(" %s" % lingering_thread, ERROR)
testing_failed = True
break
except KeyboardInterrupt:
- test.output.print_error(" aborted starting tor: keyboard interrupt\n")
+ println(" aborted starting tor: keyboard interrupt\n", ERROR)
break
except OSError:
testing_failed = True
@@ -411,7 +412,7 @@ if __name__ == '__main__':
for target in skip_targets:
req_version = stem.version.Requirement[CONFIG["target.prereq"][target]]
- test.output.print_line("Unable to run target %s, this requires tor version %s" % (target, req_version), term.Color.RED, term.Attr.BOLD)
+ println("Unable to run target %s, this requires tor version %s" % (target, req_version), ERROR)
print
@@ -430,16 +431,16 @@ if __name__ == '__main__':
has_error = testing_failed or error_tracker.has_error_occured()
if has_error:
- test.output.print_error("TESTING FAILED %s" % runtime_label)
+ println("TESTING FAILED %s" % runtime_label, ERROR)
for line in error_tracker:
- test.output.print_error(" %s" % line)
+ println(" %s" % line, ERROR)
elif skipped_test_count > 0:
- test.output.print_line("%i TESTS WERE SKIPPED" % skipped_test_count, term.Color.BLUE, term.Attr.BOLD)
- test.output.print_line("ALL OTHER TESTS PASSED %s" % runtime_label, term.Color.GREEN, term.Attr.BOLD)
+ println("%i TESTS WERE SKIPPED" % skipped_test_count, term.Color.BLUE, term.Attr.BOLD)
+ println("ALL OTHER TESTS PASSED %s" % runtime_label, term.Color.GREEN, term.Attr.BOLD)
print
else:
- test.output.print_line("TESTING PASSED %s" % runtime_label, term.Color.GREEN, term.Attr.BOLD)
+ println("TESTING PASSED %s" % runtime_label, term.Color.GREEN, term.Attr.BOLD)
print
sys.exit(1 if has_error else 0)
diff --git a/test/integ/process.py b/test/integ/process.py
index 414d233..3429adf 100644
--- a/test/integ/process.py
+++ b/test/integ/process.py
@@ -4,7 +4,6 @@ Tests the stem.process functions with various use cases.
import os
import shutil
-import signal
import subprocess
import tempfile
import time
diff --git a/test/output.py b/test/output.py
index 22f3605..b39e650 100644
--- a/test/output.py
+++ b/test/output.py
@@ -18,7 +18,14 @@ COLOR_SUPPORT = sys.stdout.isatty() and not system.is_windows()
DIVIDER = "=" * 70
HEADER_ATTR = (term.Color.CYAN, term.Attr.BOLD)
CATEGORY_ATTR = (term.Color.GREEN, term.Attr.BOLD)
-ERROR_ATTR = (term.Color.RED, term.Attr.BOLD)
+
+NO_NL = "no newline"
+
+# formatting for various categories of messages
+
+STATUS = (term.Color.BLUE, term.Attr.BOLD)
+SUBSTATUS = (term.Color.BLUE, )
+ERROR = (term.Color.RED, term.Attr.BOLD)
LineType = stem.util.enum.Enum("OK", "FAIL", "ERROR", "SKIPPED", "CONTENT")
@@ -38,41 +45,40 @@ LINE_ATTR = {
}
-def print_line(msg, *attr):
- if COLOR_SUPPORT:
- msg = term.format(msg, *attr)
-
- print msg
+def println(msg, *attr):
+ attr = _flatten(attr)
+ no_newline = False
+ if NO_NL in attr:
+ no_newline = True
+ attr.remove(NO_NL)
-def print_noline(msg, *attr):
if COLOR_SUPPORT:
msg = term.format(msg, *attr)
- sys.stdout.write(msg)
- sys.stdout.flush()
-
-
-def print_error(msg):
- print_line(msg, *ERROR_ATTR)
+ if no_newline:
+ sys.stdout.write(msg)
+ sys.stdout.flush()
+ else:
+ print msg
def print_divider(msg, is_header = False):
attr = HEADER_ATTR if is_header else CATEGORY_ATTR
- print_line("%s\n%s\n%s\n" % (DIVIDER, msg.center(70), DIVIDER), *attr)
+ println("%s\n%s\n%s\n" % (DIVIDER, msg.center(70), DIVIDER), *attr)
def print_logging(logging_buffer):
if not logging_buffer.is_empty():
for entry in logging_buffer:
- print_line(entry.replace("\n", "\n "), term.Color.MAGENTA)
+ println(entry.replace("\n", "\n "), term.Color.MAGENTA)
print
def print_config(test_config):
print_divider("TESTING CONFIG", True)
- print_line("Test configuration... ", term.Color.BLUE, term.Attr.BOLD)
+ println("Test configuration... ", term.Color.BLUE, term.Attr.BOLD)
for config_key in test_config.keys():
key_entry = " %s => " % config_key
@@ -81,7 +87,7 @@ def print_config(test_config):
value_div = ",\n" + (" " * len(key_entry))
value_entry = value_div.join(test_config.get_value(config_key, multiple = True))
- print_line(key_entry + value_entry, term.Color.BLUE)
+ println(key_entry + value_entry, term.Color.BLUE)
print
@@ -225,3 +231,20 @@ class ErrorTracker(object):
def __iter__(self):
for error_line in self._errors:
yield error_line
+
+
+def _flatten(seq):
+ # Flattens nested collections into a single list. For instance...
+ #
+ # >>> _flatten([1, [2, 3], 4])
+ # [1, 2, 3, 4]
+
+ result = []
+
+ for item in seq:
+ if (isinstance(item, (tuple, list))):
+ result.extend(_flatten(item))
+ else:
+ result.append(item)
+
+ return result
diff --git a/test/runner.py b/test/runner.py
index bcff7a0..2de1852 100644
--- a/test/runner.py
+++ b/test/runner.py
@@ -42,7 +42,6 @@ about the tor test instance they're running against.
import logging
import os
import shutil
-import signal
import stat
import tempfile
import threading
@@ -57,7 +56,7 @@ import stem.util.enum
import stem.version
import test.output
-from stem.util import term
+from test.output import println, STATUS, SUBSTATUS, NO_NL
CONFIG = stem.util.conf.config_dict("test", {
"integ.test_directory": "./test/data",
@@ -79,9 +78,6 @@ Target = stem.util.enum.UppercaseEnum(
"RUN_ALL",
)
-STATUS_ATTR = (term.Color.BLUE, term.Attr.BOLD)
-SUBSTATUS_ATTR = (term.Color.BLUE, )
-
SOCKS_HOST = "127.0.0.1"
SOCKS_PORT = 1112
@@ -295,7 +291,7 @@ class Runner(object):
if self._tor_process:
self.stop()
- test.output.print_line("Setting up a test instance...", *STATUS_ATTR)
+ println("Setting up a test instance...", STATUS)
# if 'test_directory' is unset then we make a new data directory in /tmp
# and clean it up when we're done
@@ -357,7 +353,7 @@ class Runner(object):
"""
with self._runner_lock:
- test.output.print_noline("Shutting down tor... ", *STATUS_ATTR)
+ println("Shutting down tor... ", STATUS, NO_NL)
if self._tor_process:
# if the tor process has stopped on its own then the following raises
@@ -386,7 +382,7 @@ class Runner(object):
self._custom_opts = None
self._tor_process = None
- test.output.print_line("done", *STATUS_ATTR)
+ println("done", STATUS)
def is_running(self):
"""
@@ -631,13 +627,13 @@ class Runner(object):
# makes a temporary data directory if needed
try:
- test.output.print_noline(" making test directory (%s)... " % self._test_dir, *STATUS_ATTR)
+ println(" making test directory (%s)... " % self._test_dir, STATUS, NO_NL)
if os.path.exists(self._test_dir):
- test.output.print_line("skipped", *STATUS_ATTR)
+ println("skipped", STATUS)
else:
os.makedirs(self._test_dir)
- test.output.print_line("done", *STATUS_ATTR)
+ println("done", STATUS)
except OSError, exc:
test.output.print_error("failed (%s)" % exc)
raise exc
@@ -649,16 +645,16 @@ class Runner(object):
if Torrc.SOCKET in self._custom_opts:
try:
socket_dir = os.path.dirname(CONTROL_SOCKET_PATH)
- test.output.print_noline(" making control socket directory (%s)... " % socket_dir, *STATUS_ATTR)
+ println(" making control socket directory (%s)... " % socket_dir, STATUS, NO_NL)
if os.path.exists(socket_dir) and stat.S_IMODE(os.stat(socket_dir).st_mode) == 0700:
- test.output.print_line("skipped", *STATUS_ATTR)
+ println("skipped", STATUS)
else:
if not os.path.exists(socket_dir):
os.makedirs(socket_dir)
os.chmod(socket_dir, 0700)
- test.output.print_line("done", *STATUS_ATTR)
+ println("done", STATUS)
except OSError, exc:
test.output.print_error("failed (%s)" % exc)
raise exc
@@ -668,7 +664,7 @@ class Runner(object):
if logging_path:
logging_path = stem.util.system.expand_path(logging_path, STEM_BASE)
- test.output.print_noline(" configuring logger (%s)... " % logging_path, *STATUS_ATTR)
+ println(" configuring logger (%s)... " % logging_path, STATUS, NO_NL)
# delete the old log
if os.path.exists(logging_path):
@@ -681,23 +677,23 @@ class Runner(object):
datefmt = '%D %H:%M:%S',
)
- test.output.print_line("done", *STATUS_ATTR)
+ println("done", STATUS)
else:
- test.output.print_line(" configuring logger... skipped", *STATUS_ATTR)
+ println(" configuring logger... skipped", STATUS)
# writes our testing torrc
torrc_dst = os.path.join(self._test_dir, "torrc")
try:
- test.output.print_noline(" writing torrc (%s)... " % torrc_dst, *STATUS_ATTR)
+ println(" writing torrc (%s)... " % torrc_dst, STATUS, NO_NL)
torrc_file = open(torrc_dst, "w")
torrc_file.write(self._torrc_contents)
torrc_file.close()
- test.output.print_line("done", *STATUS_ATTR)
+ println("done", STATUS)
for line in self._torrc_contents.strip().splitlines():
- test.output.print_line(" %s" % line.strip(), *SUBSTATUS_ATTR)
+ println(" %s" % line.strip(), SUBSTATUS)
print
except Exception, exc:
@@ -714,7 +710,7 @@ class Runner(object):
:raises: OSError if we either fail to create the tor process or reached a timeout without success
"""
- test.output.print_line("Starting tor...\n", *STATUS_ATTR)
+ println("Starting tor...\n", STATUS)
start_time = time.time()
try:
@@ -723,13 +719,13 @@ class Runner(object):
complete_percent = 100 if Target.ONLINE in self.attribute_targets else 5
# prints output from tor's stdout while it starts up
- print_init_line = lambda line: test.output.print_line(" %s" % line, *SUBSTATUS_ATTR)
+ print_init_line = lambda line: println(" %s" % line, SUBSTATUS)
torrc_dst = os.path.join(self._test_dir, "torrc")
self._tor_process = stem.process.launch_tor(tor_cmd, None, torrc_dst, complete_percent, print_init_line)
runtime = time.time() - start_time
- test.output.print_line(" done (%i seconds)\n" % runtime, *STATUS_ATTR)
+ println(" done (%i seconds)\n" % runtime, STATUS)
except OSError, exc:
test.output.print_error(" failed to start tor: %s\n" % exc)
raise exc
1
0
14 Apr '13
commit 8a09c4951afb3b806a75fed00e70f280287a76f2
Author: Damian Johnson <atagar(a)torproject.org>
Date: Sat Apr 13 12:54:51 2013 -0700
Dropping direct term usage in run_tests.py
Our use of the output module is trending more toward printing message
categories rather than specific terminal attributes. This will let us customize
how things are rendered through the output module rather than chasing down all
of the individual println() calls.
---
run_tests.py | 17 ++++++++---------
test/output.py | 2 ++
2 files changed, 10 insertions(+), 9 deletions(-)
diff --git a/run_tests.py b/run_tests.py
index f47511c..a2f093e 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -19,18 +19,17 @@ import stem.prereq
import stem.util.conf
import stem.util.enum
-from stem.util import log, system, term
+from stem.util import log, system
import test.output
import test.runner
import test.util
-from test.output import println, STATUS, ERROR, NO_NL
+from test.output import println, STATUS, SUCCESS, ERROR, NO_NL
from test.runner import Target
OPT = "auist:l:h"
OPT_EXPANDED = ["all", "unit", "integ", "style", "python3", "clean", "targets=", "test=", "log=", "tor=", "help"]
-DIVIDER = "=" * 70
CONFIG = stem.util.conf.config_dict("test", {
"msg.help": "",
@@ -121,10 +120,10 @@ def _print_static_issues(run_unit, run_integ, run_style):
println("Style checks require pep8. Please install it from...\n http://pypi.python.org/pypi/pep8\n", ERROR)
if static_check_issues:
- println("STATIC CHECKS", term.Color.BLUE, term.Attr.BOLD)
+ println("STATIC CHECKS", STATUS)
for file_path in static_check_issues:
- println("* %s" % file_path, term.Color.BLUE, term.Attr.BOLD)
+ println("* %s" % file_path, STATUS)
for line_number, msg in static_check_issues[file_path]:
line_count = "%-4s" % line_number
@@ -366,7 +365,7 @@ if __name__ == '__main__':
integ_runner.start(target, attribute_targets, tor_path, extra_torrc_opts = torrc_opts)
- println("Running tests...\n", term.Color.BLUE, term.Attr.BOLD)
+ println("Running tests...\n", STATUS)
for test_class in test.util.get_integ_tests(test_prefix):
test.output.print_divider(test_class.__module__)
@@ -431,9 +430,9 @@ if __name__ == '__main__':
for line in error_tracker:
println(" %s" % line, ERROR)
elif skipped_test_count > 0:
- println("%i TESTS WERE SKIPPED" % skipped_test_count, term.Color.BLUE, term.Attr.BOLD)
- println("ALL OTHER TESTS PASSED %s\n" % runtime_label, term.Color.GREEN, term.Attr.BOLD)
+ println("%i TESTS WERE SKIPPED" % skipped_test_count, STATUS)
+ println("ALL OTHER TESTS PASSED %s\n" % runtime_label, SUCCESS)
else:
- println("TESTING PASSED %s\n" % runtime_label, term.Color.GREEN, term.Attr.BOLD)
+ println("TESTING PASSED %s\n" % runtime_label, SUCCESS)
sys.exit(1 if has_error else 0)
diff --git a/test/output.py b/test/output.py
index f174c7b..d965e76 100644
--- a/test/output.py
+++ b/test/output.py
@@ -25,6 +25,8 @@ NO_NL = "no newline"
STATUS = (term.Color.BLUE, term.Attr.BOLD)
SUBSTATUS = (term.Color.BLUE, )
+
+SUCCESS = (term.Color.GREEN, term.Attr.BOLD)
ERROR = (term.Color.RED, term.Attr.BOLD)
LineType = stem.util.enum.Enum("OK", "FAIL", "ERROR", "SKIPPED", "CONTENT")
1
0
commit 24e4f0fca0fefa865aee1be527e5bb491f32aab8
Author: Damian Johnson <atagar(a)torproject.org>
Date: Sat Apr 13 12:47:35 2013 -0700
Dropping direct use of print
Replacing our print calls with println(). This is both to standardize how we
print our output as well as make later customization easier (for instance, if
we want a --quiet option).
---
run_tests.py | 49 +++++++++++++++++++++----------------------------
test/output.py | 2 +-
test/runner.py | 2 +-
3 files changed, 23 insertions(+), 30 deletions(-)
diff --git a/run_tests.py b/run_tests.py
index 4116e40..f47511c 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -68,8 +68,7 @@ def _python3_setup(python3_destination, clean):
shutil.rmtree(python3_destination, ignore_errors = True)
if os.path.exists(python3_destination):
- println("Reusing '%s'. Run again with '--clean' if you want to recreate the python3 export." % python3_destination, ERROR)
- print
+ println("Reusing '%s'. Run again with '--clean' if you want to recreate the python3 export.\n" % python3_destination, ERROR)
return True
os.makedirs(python3_destination)
@@ -131,16 +130,14 @@ def _print_static_issues(run_unit, run_integ, run_style):
line_count = "%-4s" % line_number
println(" line %s - %s" % (line_count, msg))
- print
+ println()
if __name__ == '__main__':
try:
stem.prereq.check_requirements()
except ImportError, exc:
- print exc
- print
-
+ println("%s\n" % exc)
sys.exit(1)
start_time = time.time()
@@ -160,7 +157,7 @@ if __name__ == '__main__':
try:
opts = getopt.getopt(sys.argv[1:], OPT, OPT_EXPANDED)[0]
except getopt.GetoptError, exc:
- print "%s (for usage provide --help)" % exc
+ println("%s (for usage provide --help)" % exc)
sys.exit(1)
run_unit = False
@@ -209,12 +206,12 @@ if __name__ == '__main__':
# validates the targets and split them into run and attribute targets
if not integ_targets:
- print "No targets provided"
+ println("No targets provided")
sys.exit(1)
for target in integ_targets:
if not target in Target:
- print "Invalid integration target: %s" % target
+ println("Invalid integration target: %s" % target)
sys.exit(1)
elif target in all_run_targets:
run_targets.append(target)
@@ -236,24 +233,23 @@ if __name__ == '__main__':
# Prints usage information and quits. This includes a listing of the
# valid integration targets.
- print CONFIG["msg.help"]
+ println(CONFIG["msg.help"])
# gets the longest target length so we can show the entries in columns
target_name_length = max(map(len, Target))
description_format = " %%-%is - %%s" % target_name_length
for target in Target:
- print description_format % (target, CONFIG["target.description"].get(target, ""))
-
- print
+ println(description_format % (target, CONFIG["target.description"].get(target, "")))
+ println()
sys.exit()
# basic validation on user input
if logging_runlevel and not logging_runlevel in log.LOG_VALUES:
- print "'%s' isn't a logging runlevel, use one of the following instead:" % logging_runlevel
- print " TRACE, DEBUG, INFO, NOTICE, WARN, ERROR"
+ println("'%s' isn't a logging runlevel, use one of the following instead:" % logging_runlevel)
+ println(" TRACE, DEBUG, INFO, NOTICE, WARN, ERROR")
sys.exit(1)
# check that we have 2to3 and python3 available in our PATH
@@ -307,11 +303,11 @@ if __name__ == '__main__':
# no orphaned files, nothing to do
println("done", STATUS)
else:
- print
+ println()
for pyc_file in orphaned_pyc:
println(" removed %s" % pyc_file, ERROR)
- print
+ println()
if run_unit:
test.output.print_divider("UNIT TESTS", True)
@@ -326,11 +322,11 @@ if __name__ == '__main__':
skipped_test_count += len(run_result.skipped)
sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
- print
+ println()
test.output.print_logging(logging_buffer)
- print
+ println()
if run_integ:
test.output.print_divider("INTEGRATION TESTS", True)
@@ -370,8 +366,7 @@ if __name__ == '__main__':
integ_runner.start(target, attribute_targets, tor_path, extra_torrc_opts = torrc_opts)
- println("Running tests...", term.Color.BLUE, term.Attr.BOLD)
- print
+ println("Running tests...\n", term.Color.BLUE, term.Attr.BOLD)
for test_class in test.util.get_integ_tests(test_prefix):
test.output.print_divider(test_class.__module__)
@@ -382,7 +377,7 @@ if __name__ == '__main__':
skipped_test_count += len(run_result.skipped)
sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
- print
+ println()
test.output.print_logging(logging_buffer)
@@ -408,13 +403,13 @@ if __name__ == '__main__':
integ_runner.stop()
if skip_targets:
- print
+ println()
for target in skip_targets:
req_version = stem.version.Requirement[CONFIG["target.prereq"][target]]
println("Unable to run target %s, this requires tor version %s" % (target, req_version), ERROR)
- print
+ println()
# TODO: note unused config options afterward?
@@ -437,10 +432,8 @@ if __name__ == '__main__':
println(" %s" % line, ERROR)
elif skipped_test_count > 0:
println("%i TESTS WERE SKIPPED" % skipped_test_count, term.Color.BLUE, term.Attr.BOLD)
- println("ALL OTHER TESTS PASSED %s" % runtime_label, term.Color.GREEN, term.Attr.BOLD)
- print
+ println("ALL OTHER TESTS PASSED %s\n" % runtime_label, term.Color.GREEN, term.Attr.BOLD)
else:
- println("TESTING PASSED %s" % runtime_label, term.Color.GREEN, term.Attr.BOLD)
- print
+ println("TESTING PASSED %s\n" % runtime_label, term.Color.GREEN, term.Attr.BOLD)
sys.exit(1 if has_error else 0)
diff --git a/test/output.py b/test/output.py
index b39e650..f174c7b 100644
--- a/test/output.py
+++ b/test/output.py
@@ -45,7 +45,7 @@ LINE_ATTR = {
}
-def println(msg, *attr):
+def println(msg = "", *attr):
attr = _flatten(attr)
no_newline = False
diff --git a/test/runner.py b/test/runner.py
index 2de1852..a15cec9 100644
--- a/test/runner.py
+++ b/test/runner.py
@@ -695,7 +695,7 @@ class Runner(object):
for line in self._torrc_contents.strip().splitlines():
println(" %s" % line.strip(), SUBSTATUS)
- print
+ println()
except Exception, exc:
test.output.print_error("failed (%s)\n" % exc)
raise OSError(exc)
1
0
commit ae5193f93ec7f3c9db74e1b0452939a7cb1196c4
Merge: 314876a 62413a2
Author: Damian Johnson <atagar(a)torproject.org>
Date: Sat Apr 13 21:27:29 2013 -0700
Testing framework overhaul
Over time we've accumulated quite a bit of cruft. Rewriting run_tests.py and
related modules.
run_tests.py | 682 ++++++++++++++------------------------
stem/__init__.py | 2 +-
stem/descriptor/reader.py | 6 +-
stem/prereq.py | 27 +--
stem/process.py | 15 +-
test/__init__.py | 2 -
test/integ/control/controller.py | 7 +-
test/integ/process.py | 20 +-
test/network.py | 75 +++++
test/output.py | 101 ++++---
test/runner.py | 90 +++---
test/settings.cfg | 97 ++----
test/static_checks.py | 216 ------------
test/util.py | 512 ++++++++++++++++++++++++++---
14 files changed, 946 insertions(+), 906 deletions(-)
1
0
commit 62413a29dbc73377ef3dd2231da1e9f35e4f30a9
Author: Damian Johnson <atagar(a)torproject.org>
Date: Sat Apr 13 21:22:40 2013 -0700
Rewriting run_tests.py
Now that the building blocks are in place giving run_tests.py a long overdue
rewrite. This pushes a great deal of the work to the test utils in the form of
Tasks, units of work we can do in groups.
---
run_tests.py | 522 ++++++++++++++++++++++++------------------------------
stem/__init__.py | 2 +-
test/output.py | 29 ++--
test/runner.py | 20 +--
test/util.py | 296 +++++++++++++++++++++++++++----
5 files changed, 508 insertions(+), 361 deletions(-)
diff --git a/run_tests.py b/run_tests.py
index a2f093e..610f0e2 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -6,9 +6,9 @@
Runs unit and integration tests. For usage information run this with '--help'.
"""
+import collections
import getopt
import os
-import shutil
import StringIO
import sys
import threading
@@ -18,268 +18,121 @@ import unittest
import stem.prereq
import stem.util.conf
import stem.util.enum
-
-from stem.util import log, system
+import stem.util.log
+import stem.util.system
import test.output
import test.runner
import test.util
-from test.output import println, STATUS, SUCCESS, ERROR, NO_NL
-from test.runner import Target
+from test.output import STATUS, SUCCESS, ERROR, println
+from test.util import STEM_BASE, Target, Task
+
+# Our default arguments. The _get_args() function provides a named tuple of
+# this merged with our argv.
+#
+# Integration targets fall into two categories:
+#
+# * Run Targets (like RUN_COOKIE and RUN_PTRACE) which customize our torrc.
+# We do an integration test run for each run target we get.
+#
+# * Attribute Target (like CHROOT and ONLINE) which indicates
+# non-configuration changes to ur test runs. These are applied to all
+# integration runs that we perform.
+
+ARGS = {
+ 'run_unit': False,
+ 'run_integ': False,
+ 'run_style': False,
+ 'run_python3': False,
+ 'run_python3_clean': False,
+ 'test_prefix': None,
+ 'logging_runlevel': None,
+ 'tor_path': 'tor',
+ 'run_targets': [Target.RUN_OPEN],
+ 'attribute_targets': [],
+ 'print_help': False,
+}
OPT = "auist:l:h"
OPT_EXPANDED = ["all", "unit", "integ", "style", "python3", "clean", "targets=", "test=", "log=", "tor=", "help"]
CONFIG = stem.util.conf.config_dict("test", {
- "msg.help": "",
- "target.description": {},
"target.prereq": {},
"target.torrc": {},
"integ.test_directory": "./test/data",
})
-DEFAULT_RUN_TARGET = Target.RUN_OPEN
-
-base = os.path.sep.join(__file__.split(os.path.sep)[:-1]).lstrip("./")
-SOURCE_BASE_PATHS = [os.path.join(base, path) for path in ('stem', 'test', 'run_tests.py')]
-
-
-def _python3_setup(python3_destination, clean):
- """
- Exports the python3 counterpart of our codebase using 2to3.
-
- :param str python3_destination: location to export our codebase to
- :param bool clean: deletes our priorly exported codebase if **True**,
- otherwise this is a no-op
- """
-
- # Python 2.7.3 added some nice capabilities to 2to3, like '--output-dir'...
- #
- # http://docs.python.org/2/library/2to3.html
- #
- # ... but I'm using 2.7.1, and it's pretty easy to make it work without
- # requiring a bleeding edge interpretor.
-
- test.output.print_divider("EXPORTING TO PYTHON 3", True)
-
- if clean:
- shutil.rmtree(python3_destination, ignore_errors = True)
-
- if os.path.exists(python3_destination):
- println("Reusing '%s'. Run again with '--clean' if you want to recreate the python3 export.\n" % python3_destination, ERROR)
- return True
-
- os.makedirs(python3_destination)
-
- try:
- # skips the python3 destination (to avoid an infinite loop)
- def _ignore(src, names):
- if src == os.path.normpath(python3_destination):
- return names
- else:
- return []
-
- println(" copying stem to '%s'... " % python3_destination, STATUS, NO_NL)
- shutil.copytree('stem', os.path.join(python3_destination, 'stem'))
- shutil.copytree('test', os.path.join(python3_destination, 'test'), ignore = _ignore)
- shutil.copy('run_tests.py', os.path.join(python3_destination, 'run_tests.py'))
- println("done", STATUS)
- except OSError, exc:
- println("failed\n%s" % exc, ERROR)
- return False
-
- try:
- println(" running 2to3... ", STATUS, NO_NL)
- system.call("2to3 --write --nobackups --no-diffs %s" % python3_destination)
- println("done", STATUS)
- except OSError, exc:
- println("failed\n%s" % exc, ERROR)
- return False
-
- return True
-
-
-def _print_static_issues(run_unit, run_integ, run_style):
- static_check_issues = {}
-
- # If we're doing some sort of testing (unit or integ) and pyflakes is
- # available then use it. Its static checks are pretty quick so there's not
- # much overhead in including it with all tests.
-
- if run_unit or run_integ:
- if system.is_available("pyflakes"):
- static_check_issues.update(test.util.get_pyflakes_issues(SOURCE_BASE_PATHS))
- else:
- println("Static error checking requires pyflakes. Please install it from ...\n http://pypi.python.org/pypi/pyflakes\n", ERROR)
-
- if run_style:
- if system.is_available("pep8"):
- static_check_issues = test.util.get_stylistic_issues(SOURCE_BASE_PATHS)
- else:
- println("Style checks require pep8. Please install it from...\n http://pypi.python.org/pypi/pep8\n", ERROR)
-
- if static_check_issues:
- println("STATIC CHECKS", STATUS)
-
- for file_path in static_check_issues:
- println("* %s" % file_path, STATUS)
+SRC_PATHS = [os.path.join(STEM_BASE, path) for path in (
+ 'stem',
+ 'test',
+ 'run_tests.py',
+)]
- for line_number, msg in static_check_issues[file_path]:
- line_count = "%-4s" % line_number
- println(" line %s - %s" % (line_count, msg))
+LOG_TYPE_ERROR = """\
+'%s' isn't a logging runlevel, use one of the following instead:
+ TRACE, DEBUG, INFO, NOTICE, WARN, ERROR
+"""
- println()
+def main():
+ start_time = time.time()
-if __name__ == '__main__':
try:
stem.prereq.check_requirements()
except ImportError, exc:
println("%s\n" % exc)
sys.exit(1)
- start_time = time.time()
-
- # override flag to indicate at the end that testing failed somewhere
- testing_failed = False
-
- # count how many tests have been skipped.
- skipped_test_count = 0
-
- # loads and validates our various configurations
test_config = stem.util.conf.get_config("test")
-
- settings_path = os.path.join(test.runner.STEM_BASE, "test", "settings.cfg")
- test_config.load(settings_path)
+ test_config.load(os.path.join(STEM_BASE, "test", "settings.cfg"))
try:
- opts = getopt.getopt(sys.argv[1:], OPT, OPT_EXPANDED)[0]
+ args = _get_args(sys.argv[1:])
except getopt.GetoptError, exc:
println("%s (for usage provide --help)" % exc)
sys.exit(1)
+ except ValueError, exc:
+ println(str(exc))
+ sys.exit(1)
- run_unit = False
- run_integ = False
- run_style = False
- run_python3 = False
- run_python3_clean = False
-
- test_prefix = None
- logging_runlevel = None
- tor_path = "tor"
-
- # Integration testing targets fall into two categories:
- #
- # * Run Targets (like RUN_COOKIE and RUN_PTRACE) which customize our torrc.
- # We do an integration test run for each run target we get.
- #
- # * Attribute Target (like CHROOT and ONLINE) which indicates
- # non-configuration changes to ur test runs. These are applied to all
- # integration runs that we perform.
-
- run_targets = [DEFAULT_RUN_TARGET]
- attribute_targets = []
-
- for opt, arg in opts:
- if opt in ("-a", "--all"):
- run_unit = True
- run_integ = True
- run_style = True
- elif opt in ("-u", "--unit"):
- run_unit = True
- elif opt in ("-i", "--integ"):
- run_integ = True
- elif opt in ("-s", "--style"):
- run_style = True
- elif opt == "--python3":
- run_python3 = True
- elif opt == "--clean":
- run_python3_clean = True
- elif opt in ("-t", "--targets"):
- integ_targets = arg.split(",")
-
- run_targets = []
- all_run_targets = [t for t in Target if CONFIG["target.torrc"].get(t) is not None]
-
- # validates the targets and split them into run and attribute targets
-
- if not integ_targets:
- println("No targets provided")
- sys.exit(1)
-
- for target in integ_targets:
- if not target in Target:
- println("Invalid integration target: %s" % target)
- sys.exit(1)
- elif target in all_run_targets:
- run_targets.append(target)
- else:
- attribute_targets.append(target)
-
- # check if we were told to use all run targets
-
- if Target.RUN_ALL in attribute_targets:
- attribute_targets.remove(Target.RUN_ALL)
- run_targets = all_run_targets
- elif opt in ("-l", "--test"):
- test_prefix = arg
- elif opt in ("-l", "--log"):
- logging_runlevel = arg.upper()
- elif opt in ("--tor"):
- tor_path = arg
- elif opt in ("-h", "--help"):
- # Prints usage information and quits. This includes a listing of the
- # valid integration targets.
-
- println(CONFIG["msg.help"])
-
- # gets the longest target length so we can show the entries in columns
- target_name_length = max(map(len, Target))
- description_format = " %%-%is - %%s" % target_name_length
-
- for target in Target:
- println(description_format % (target, CONFIG["target.description"].get(target, "")))
+ if args.print_help:
+ println(test.util.get_help_message())
+ sys.exit()
+ elif not args.run_unit and not args.run_integ and not args.run_style:
+ println("Nothing to run (for usage provide --help)\n")
+ sys.exit()
- println()
- sys.exit()
+ test.util.run_tasks(
+ "INITIALISING",
+ Task("checking stem version", test.util.check_stem_version),
+ Task("checking python version", test.util.check_python_version),
+ Task("checking pyflakes version", test.util.check_pyflakes_version),
+ Task("checking pep8 version", test.util.check_pep8_version),
+ Task("checking for orphaned .pyc files", test.util.clean_orphaned_pyc, (SRC_PATHS,)),
+ )
- # basic validation on user input
+ if args.run_python3 and sys.version_info[0] != 3:
+ test.util.run_tasks(
+ "EXPORTING TO PYTHON 3",
+ Task("checking requirements", test.util.python3_prereq),
+ Task("cleaning prior export", test.util.python3_clean, (not args.run_python3_clean,)),
+ Task("exporting python 3 copy", test.util.python3_copy_stem),
+ Task("running tests", test.util.python3_run_tests),
+ )
- if logging_runlevel and not logging_runlevel in log.LOG_VALUES:
- println("'%s' isn't a logging runlevel, use one of the following instead:" % logging_runlevel)
- println(" TRACE, DEBUG, INFO, NOTICE, WARN, ERROR")
+ println("BUG: python3_run_tests() should have terminated our process", ERROR)
sys.exit(1)
- # check that we have 2to3 and python3 available in our PATH
- if run_python3:
- for required_cmd in ("2to3", "python3"):
- if not system.is_available(required_cmd):
- println("Unable to test python 3 because %s isn't in your path" % required_cmd, ERROR)
- sys.exit(1)
-
- if run_python3 and sys.version_info[0] != 3:
- python3_destination = os.path.join(CONFIG["integ.test_directory"], "python3")
-
- if _python3_setup(python3_destination, run_python3_clean):
- python3_runner = os.path.join(python3_destination, "run_tests.py")
- exit_status = os.system("python3 %s %s" % (python3_runner, " ".join(sys.argv[1:])))
- sys.exit(exit_status)
- else:
- sys.exit(1) # failed to do python3 setup
-
- if not run_unit and not run_integ and not run_style:
- println("Nothing to run (for usage provide --help)\n")
- sys.exit()
+ # buffer that we log messages into so they can be printed after a test has finished
- # if we have verbose logging then provide the testing config
- our_level = stem.util.log.logging_level(logging_runlevel)
- info_level = stem.util.log.logging_level(stem.util.log.INFO)
+ logging_buffer = stem.util.log.LogBuffer(args.logging_runlevel)
+ stem.util.log.get_logger().addHandler(logging_buffer)
- if our_level <= info_level:
- test.output.print_config(test_config)
+ # filters for how testing output is displayed
error_tracker = test.output.ErrorTracker()
+
output_filters = (
error_tracker.get_filter(),
test.output.strip_module,
@@ -287,63 +140,39 @@ if __name__ == '__main__':
test.output.colorize,
)
- stem_logger = log.get_logger()
- logging_buffer = log.LogBuffer(logging_runlevel)
- stem_logger.addHandler(logging_buffer)
-
- test.output.print_divider("INITIALISING", True)
+ # Number of tests that we have skipped. This is only available with python
+ # 2.7 or later because before that test results didn't have a 'skipped'
+ # attribute.
- println("Performing startup activities...", STATUS)
- println(" checking for orphaned .pyc files... ", STATUS, NO_NL)
+ skipped_tests = 0
- orphaned_pyc = test.util.clean_orphaned_pyc(SOURCE_BASE_PATHS)
-
- if not orphaned_pyc:
- # no orphaned files, nothing to do
- println("done", STATUS)
- else:
- println()
- for pyc_file in orphaned_pyc:
- println(" removed %s" % pyc_file, ERROR)
-
- println()
-
- if run_unit:
+ if args.run_unit:
test.output.print_divider("UNIT TESTS", True)
error_tracker.set_category("UNIT TEST")
- for test_class in test.util.get_unit_tests(test_prefix):
- test.output.print_divider(test_class.__module__)
- suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
- test_results = StringIO.StringIO()
- run_result = unittest.TextTestRunner(test_results, verbosity=2).run(suite)
- if stem.prereq.is_python_27():
- skipped_test_count += len(run_result.skipped)
-
- sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
- println()
-
- test.output.print_logging(logging_buffer)
+ for test_class in test.util.get_unit_tests(args.test_prefix):
+ run_result = _run_test(test_class, output_filters, logging_buffer)
+ skipped_tests += len(getattr(run_result, 'skipped', []))
println()
- if run_integ:
+ if args.run_integ:
test.output.print_divider("INTEGRATION TESTS", True)
integ_runner = test.runner.get_runner()
# Determine targets we don't meet the prereqs for. Warnings are given about
# these at the end of the test run so they're more noticeable.
- our_version = stem.version.get_system_tor_version(tor_path)
- skip_targets = []
+ our_version = stem.version.get_system_tor_version(args.tor_path)
+ skipped_targets = []
- for target in run_targets:
+ for target in args.run_targets:
# check if we meet this target's tor version prerequisites
target_prereq = CONFIG["target.prereq"].get(target)
if target_prereq and our_version < stem.version.Requirement[target_prereq]:
- skip_targets.append(target)
+ skipped_targets.append(target)
continue
error_tracker.set_category(target)
@@ -363,22 +192,13 @@ if __name__ == '__main__':
println("'%s' isn't a test.runner.Torrc enumeration" % opt)
sys.exit(1)
- integ_runner.start(target, attribute_targets, tor_path, extra_torrc_opts = torrc_opts)
+ integ_runner.start(target, args.attribute_targets, args.tor_path, extra_torrc_opts = torrc_opts)
println("Running tests...\n", STATUS)
- for test_class in test.util.get_integ_tests(test_prefix):
- test.output.print_divider(test_class.__module__)
- suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
- test_results = StringIO.StringIO()
- run_result = unittest.TextTestRunner(test_results, verbosity=2).run(suite)
- if stem.prereq.is_python_27():
- skipped_test_count += len(run_result.skipped)
-
- sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
- println()
-
- test.output.print_logging(logging_buffer)
+ for test_class in test.util.get_integ_tests(args.test_prefix):
+ run_result = _run_test(test_class, output_filters, logging_buffer)
+ skipped_tests += len(getattr(run_result, 'skipped', []))
# We should have joined on all threads. If not then that indicates a
# leak that could both likely be a bug and disrupt further targets.
@@ -391,48 +211,166 @@ if __name__ == '__main__':
for lingering_thread in active_threads:
println(" %s" % lingering_thread, ERROR)
- testing_failed = True
+ error_tracker.note_error()
break
except KeyboardInterrupt:
println(" aborted starting tor: keyboard interrupt\n", ERROR)
break
except OSError:
- testing_failed = True
+ error_tracker.note_error()
finally:
integ_runner.stop()
- if skip_targets:
+ if skipped_targets:
println()
- for target in skip_targets:
+ for target in skipped_targets:
req_version = stem.version.Requirement[CONFIG["target.prereq"][target]]
println("Unable to run target %s, this requires tor version %s" % (target, req_version), ERROR)
println()
- # TODO: note unused config options afterward?
-
if not stem.prereq.is_python_3():
- _print_static_issues(run_unit, run_integ, run_style)
+ _print_static_issues(args)
- runtime = time.time() - start_time
+ runtime_label = "(%i seconds)" % (time.time() - start_time)
- if runtime < 1:
- runtime_label = "(%0.1f seconds)" % runtime
- else:
- runtime_label = "(%i seconds)" % runtime
-
- has_error = testing_failed or error_tracker.has_error_occured()
-
- if has_error:
+ if error_tracker.has_errors_occured():
println("TESTING FAILED %s" % runtime_label, ERROR)
for line in error_tracker:
println(" %s" % line, ERROR)
- elif skipped_test_count > 0:
- println("%i TESTS WERE SKIPPED" % skipped_test_count, STATUS)
- println("ALL OTHER TESTS PASSED %s\n" % runtime_label, SUCCESS)
else:
+ if skipped_tests > 0:
+ println("%i TESTS WERE SKIPPED" % skipped_tests, STATUS)
+
println("TESTING PASSED %s\n" % runtime_label, SUCCESS)
- sys.exit(1 if has_error else 0)
+ sys.exit(1 if error_tracker.has_errors_occured() else 0)
+
+
+def _get_args(argv):
+ """
+ Parses our arguments, providing a named tuple with their values.
+
+ :param list argv: input arguments to be parsed
+
+ :returns: a **named tuple** with our parsed arguments
+
+ :raises: **ValueError** if we got an invalid argument
+ :raises: **getopt.GetoptError** if the arguments don't conform with what we
+ accept
+ """
+
+ args = dict(ARGS)
+
+ for opt, arg in getopt.getopt(argv, OPT, OPT_EXPANDED)[0]:
+ if opt in ("-a", "--all"):
+ args['run_unit'] = True
+ args['run_integ'] = True
+ args['run_style'] = True
+ elif opt in ("-u", "--unit"):
+ args['run_unit'] = True
+ elif opt in ("-i", "--integ"):
+ args['run_integ'] = True
+ elif opt in ("-s", "--style"):
+ args['run_style'] = True
+ elif opt == "--python3":
+ args['run_python3'] = True
+ elif opt == "--clean":
+ args['run_python3_clean'] = True
+ elif opt in ("-t", "--targets"):
+ run_targets, attribute_targets = [], []
+
+ integ_targets = arg.split(",")
+ all_run_targets = [t for t in Target if CONFIG["target.torrc"].get(t) is not None]
+
+ # validates the targets and split them into run and attribute targets
+
+ if not integ_targets:
+ raise ValueError("No targets provided")
+
+ for target in integ_targets:
+ if not target in Target:
+ raise ValueError("Invalid integration target: %s" % target)
+ elif target in all_run_targets:
+ run_targets.append(target)
+ else:
+ attribute_targets.append(target)
+
+ # check if we were told to use all run targets
+
+ if Target.RUN_ALL in attribute_targets:
+ attribute_targets.remove(Target.RUN_ALL)
+ run_targets = all_run_targets
+
+ args['run_targets'] = run_targets
+ args['attribute_targets'] = attribute_targets
+ elif opt in ("-l", "--test"):
+ args['test_prefix'] = arg
+ elif opt in ("-l", "--log"):
+ arg = arg.upper()
+
+ if not arg in stem.util.log.LOG_VALUES:
+ raise ValueError(LOG_TYPE_ERROR % arg)
+
+ args['logging_runlevel'] = arg
+ elif opt in ("--tor"):
+ args['tor_path'] = arg
+ elif opt in ("-h", "--help"):
+ args['print_help'] = True
+
+ # translates our args dict into a named tuple
+
+ Args = collections.namedtuple('Args', args.keys())
+ return Args(**args)
+
+
+def _print_static_issues(args):
+ static_check_issues = {}
+
+ # If we're doing some sort of testing (unit or integ) and pyflakes is
+ # available then use it. Its static checks are pretty quick so there's not
+ # much overhead in including it with all tests.
+
+ if args.run_unit or args.run_integ:
+ if stem.util.system.is_available("pyflakes"):
+ static_check_issues.update(test.util.get_pyflakes_issues(SRC_PATHS))
+ else:
+ println("Static error checking requires pyflakes. Please install it from ...\n http://pypi.python.org/pypi/pyflakes\n", ERROR)
+
+ if args.run_style:
+ if stem.util.system.is_available("pep8"):
+ static_check_issues.update(test.util.get_stylistic_issues(SRC_PATHS))
+ else:
+ println("Style checks require pep8. Please install it from...\n http://pypi.python.org/pypi/pep8\n", ERROR)
+
+ if static_check_issues:
+ println("STATIC CHECKS", STATUS)
+
+ for file_path in static_check_issues:
+ println("* %s" % file_path, STATUS)
+
+ for line_number, msg in static_check_issues[file_path]:
+ line_count = "%-4s" % line_number
+ println(" line %s - %s" % (line_count, msg))
+
+ println()
+
+
+def _run_test(test_class, output_filters, logging_buffer):
+ test.output.print_divider(test_class.__module__)
+ suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
+
+ test_results = StringIO.StringIO()
+ run_result = unittest.TextTestRunner(test_results, verbosity=2).run(suite)
+
+ sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
+ println()
+ test.output.print_logging(logging_buffer)
+
+ return run_result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/stem/__init__.py b/stem/__init__.py
index 0dacc85..ed66a99 100644
--- a/stem/__init__.py
+++ b/stem/__init__.py
@@ -369,7 +369,7 @@ Library for working with the tor process.
=============== ===========
"""
-__version__ = '1.0.1'
+__version__ = '1.0.1-dev'
__author__ = 'Damian Johnson'
__contact__ = 'atagar(a)torproject.org'
__url__ = 'https://stem.torproject.org/'
diff --git a/test/output.py b/test/output.py
index d965e76..be51b60 100644
--- a/test/output.py
+++ b/test/output.py
@@ -78,22 +78,6 @@ def print_logging(logging_buffer):
print
-def print_config(test_config):
- print_divider("TESTING CONFIG", True)
- println("Test configuration... ", term.Color.BLUE, term.Attr.BOLD)
-
- for config_key in test_config.keys():
- key_entry = " %s => " % config_key
-
- # if there's multiple values then list them on separate lines
- value_div = ",\n" + (" " * len(key_entry))
- value_entry = value_div.join(test_config.get_value(config_key, multiple = True))
-
- println(key_entry + value_entry, term.Color.BLUE)
-
- print
-
-
def apply_filters(testing_output, *filters):
"""
Gets the tests results, possibly processed through a series of filters. The
@@ -200,6 +184,15 @@ class ErrorTracker(object):
def __init__(self):
self._errors = []
self._category = None
+ self._error_noted = False
+
+ def note_error(self):
+ """
+ If called then has_errors_occured() will report that an error has occured,
+ even if we haven't encountered an error message in the tests.
+ """
+
+ self._error_noted = True
def set_category(self, category):
"""
@@ -215,8 +208,8 @@ class ErrorTracker(object):
self._category = category
- def has_error_occured(self):
- return bool(self._errors)
+ def has_errors_occured(self):
+ return self._error_noted or bool(self._errors)
def get_filter(self):
def _error_tracker(line_type, line_content):
diff --git a/test/runner.py b/test/runner.py
index a15cec9..4d8e533 100644
--- a/test/runner.py
+++ b/test/runner.py
@@ -57,27 +57,13 @@ import stem.version
import test.output
from test.output import println, STATUS, SUBSTATUS, NO_NL
+from test.util import Target, STEM_BASE
CONFIG = stem.util.conf.config_dict("test", {
"integ.test_directory": "./test/data",
"integ.log": "./test/data/log",
})
-Target = stem.util.enum.UppercaseEnum(
- "ONLINE",
- "RELATIVE",
- "CHROOT",
- "RUN_NONE",
- "RUN_OPEN",
- "RUN_PASSWORD",
- "RUN_COOKIE",
- "RUN_MULTIPLE",
- "RUN_SOCKET",
- "RUN_SCOOKIE",
- "RUN_PTRACE",
- "RUN_ALL",
-)
-
SOCKS_HOST = "127.0.0.1"
SOCKS_PORT = 1112
@@ -87,10 +73,6 @@ SocksListenAddress %s:%i
DownloadExtraInfo 1
""" % (SOCKS_HOST, SOCKS_PORT)
-# We make some paths relative to stem's base directory (the one above us)
-# rather than the process' cwd. This doesn't end with a slash.
-STEM_BASE = os.path.sep.join(__file__.split(os.path.sep)[:-2])
-
# singleton Runner instance
INTEG_RUNNER = None
diff --git a/test/util.py b/test/util.py
index 9c0e23e..52a9989 100644
--- a/test/util.py
+++ b/test/util.py
@@ -9,18 +9,47 @@ Helper functions for our test framework.
get_unit_tests - provides our unit tests
get_integ_tests - provides our integration tests
- clean_orphaned_pyc - removes any *.pyc without a corresponding *.py
+ get_help_message - provides usage information for running our tests
+ get_python3_destination - location where a python3 copy of stem is exported to
get_stylistic_issues - checks for PEP8 and other stylistic issues
get_pyflakes_issues - static checks for problems via pyflakes
+
+Sets of :class:`~test.util.Task` instances can be ran with
+:func:`~test.util.run_tasks`. Functions that are intended for easy use with
+Tasks are...
+
+::
+
+ Initialization
+ |- check_stem_version - checks our version of stem
+ |- check_python_version - checks our version of python
+ |- check_pyflakes_version - checks our version of pyflakes
+ |- check_pep8_version - checks our version of pep8
+ +- clean_orphaned_pyc - removes any *.pyc without a corresponding *.py
+
+ Testing Python 3
+ |- python3_prereq - checks that we have python3 and 2to3
+ |- python3_clean - deletes our prior python3 export
+ |- python3_copy_stem - copies our codebase and converts with 2to3
+ +- python3_run_tests - runs python 3 tests
"""
import re
import os
+import shutil
+import sys
+import stem
import stem.util.conf
import stem.util.system
+import test.output
+
+from test.output import STATUS, ERROR, NO_NL, println
+
CONFIG = stem.util.conf.config_dict("test", {
+ "msg.help": "",
+ "target.description": {},
"pep8.ignore": [],
"pyflakes.ignore": [],
"integ.test_directory": "./test/data",
@@ -28,6 +57,25 @@ CONFIG = stem.util.conf.config_dict("test", {
"test.integ_tests": "",
})
+Target = stem.util.enum.UppercaseEnum(
+ "ONLINE",
+ "RELATIVE",
+ "CHROOT",
+ "RUN_NONE",
+ "RUN_OPEN",
+ "RUN_PASSWORD",
+ "RUN_COOKIE",
+ "RUN_MULTIPLE",
+ "RUN_SOCKET",
+ "RUN_SCOOKIE",
+ "RUN_PTRACE",
+ "RUN_ALL",
+)
+
+# We make some paths relative to stem's base directory (the one above us)
+# rather than the process' cwd. This doesn't end with a slash.
+STEM_BASE = os.path.sep.join(__file__.split(os.path.sep)[:-2])
+
# mapping of files to the issues that should be ignored
PYFLAKES_IGNORE = None
@@ -79,44 +127,37 @@ def _get_tests(modules, prefix):
yield module
-def clean_orphaned_pyc(paths):
+def get_help_message():
"""
- Deletes any file with a *.pyc extention without a corresponding *.py. This
- helps to address a common gotcha when deleting python files...
+ Provides usage information, as provided by the '--help' argument. This
+ includes a listing of the valid integration targets.
- * You delete module 'foo.py' and run the tests to ensure that you haven't
- broken anything. They pass, however there *are* still some 'import foo'
- statements that still work because the bytecode (foo.pyc) is still around.
+ :returns: **str** with our usage information
+ """
- * You push your change.
+ help_msg = CONFIG["msg.help"]
- * Another developer clones our repository and is confused because we have a
- bunch of ImportErrors.
+ # gets the longest target length so we can show the entries in columns
+ target_name_length = max(map(len, Target))
+ description_format = "\n %%-%is - %%s" % target_name_length
- :param list paths: paths to search for orphaned pyc files
+ for target in Target:
+ help_msg += description_format % (target, CONFIG["target.description"].get(target, ""))
- :returns: list of files that we deleted
- """
+ help_msg += "\n"
- orphaned_pyc = []
+ return help_msg
- for path in paths:
- for pyc_path in _get_files_with_suffix(path, ".pyc"):
- # If we're running python 3 then the *.pyc files are no longer bundled
- # with the *.py. Rather, they're in a __pycache__ directory.
- #
- # At the moment there's no point in checking for orphaned bytecode with
- # python 3 because it's an exported copy of the python 2 codebase, so
- # skipping.
- if "__pycache__" in pyc_path:
- continue
+def get_python3_destination():
+ """
+ Provides the location where a python 3 copy of stem is exported to for
+ testing.
- if not os.path.exists(pyc_path[:-1]):
- orphaned_pyc.append(pyc_path)
- os.remove(pyc_path)
+ :returns: **str** with the relative path to our python 3 location
+ """
- return orphaned_pyc
+ return os.path.join(CONFIG["integ.test_directory"], "python3")
def get_stylistic_issues(paths):
@@ -130,7 +171,7 @@ def get_stylistic_issues(paths):
:param list paths: paths to search for stylistic issues
- :returns: dict of the form ``path => [(line_number, message)...]``
+ :returns: **dict** of the form ``path => [(line_number, message)...]``
"""
# The pep8 command give output of the form...
@@ -229,14 +270,138 @@ def get_pyflakes_issues(paths):
if line_match:
path, line, issue = line_match.groups()
- if not _is_test_data(path) and not issue in PYFLAKES_IGNORE.get(path, []):
+ if _is_test_data(path):
+ continue
+
+ # paths in PYFLAKES_IGNORE are relative, so we need to check to see if
+ # our path ends with any of them
+
+ ignore_issue = False
+
+ for ignore_path in PYFLAKES_IGNORE:
+ if path.endswith(ignore_path) and issue in PYFLAKES_IGNORE[ignore_path]:
+ ignore_issue = True
+ break
+
+ if not ignore_issue:
issues.setdefault(path, []).append((int(line), issue))
return issues
+def check_stem_version():
+ return stem.__version__
+
+
+def check_python_version():
+ return '.'.join(map(str, sys.version_info[:3]))
+
+
+def check_pyflakes_version():
+ try:
+ import pyflakes
+ return pyflakes.__version__
+ except ImportError:
+ return "missing"
+
+
+def check_pep8_version():
+ try:
+ import pep8
+ return pep8.__version__
+ except ImportError:
+ return "missing"
+
+
+def clean_orphaned_pyc(paths):
+ """
+ Deletes any file with a *.pyc extention without a corresponding *.py. This
+ helps to address a common gotcha when deleting python files...
+
+ * You delete module 'foo.py' and run the tests to ensure that you haven't
+ broken anything. They pass, however there *are* still some 'import foo'
+ statements that still work because the bytecode (foo.pyc) is still around.
+
+ * You push your change.
+
+ * Another developer clones our repository and is confused because we have a
+ bunch of ImportErrors.
+
+ :param list paths: paths to search for orphaned pyc files
+ """
+
+ orphaned_pyc = []
+
+ for path in paths:
+ for pyc_path in _get_files_with_suffix(path, ".pyc"):
+ # If we're running python 3 then the *.pyc files are no longer bundled
+ # with the *.py. Rather, they're in a __pycache__ directory.
+ #
+ # At the moment there's no point in checking for orphaned bytecode with
+ # python 3 because it's an exported copy of the python 2 codebase, so
+ # skipping.
+
+ if "__pycache__" in pyc_path:
+ continue
+
+ if not os.path.exists(pyc_path[:-1]):
+ orphaned_pyc.append(pyc_path)
+ os.remove(pyc_path)
+
+ return ["removed %s" % path for path in orphaned_pyc]
+
+
+def python3_prereq():
+ for required_cmd in ("2to3", "python3"):
+ if not stem.util.system.is_available(required_cmd):
+ raise ValueError("Unable to test python 3 because %s isn't in your path" % required_cmd)
+
+
+def python3_clean(skip = False):
+ location = get_python3_destination()
+
+ if not os.path.exists(location):
+ return "skipped"
+ elif skip:
+ return ["Reusing '%s'. Run again with '--clean' if you want a fresh copy." % location]
+ else:
+ shutil.rmtree(location, ignore_errors = True)
+ return "done"
+
+
+def python3_copy_stem():
+ destination = get_python3_destination()
+
+ if os.path.exists(destination):
+ return "skipped"
+
+ # skips the python3 destination (to avoid an infinite loop)
+ def _ignore(src, names):
+ if src == os.path.normpath(destination):
+ return names
+ else:
+ return []
+
+ os.makedirs(destination)
+ shutil.copytree('stem', os.path.join(destination, 'stem'))
+ shutil.copytree('test', os.path.join(destination, 'test'), ignore = _ignore)
+ shutil.copy('run_tests.py', os.path.join(destination, 'run_tests.py'))
+ stem.util.system.call("2to3 --write --nobackups --no-diffs %s" % get_python3_destination())
+
+ return "done"
+
+
+def python3_run_tests():
+ println()
+ println()
+
+ python3_runner = os.path.join(get_python3_destination(), "run_tests.py")
+ exit_status = os.system("python3 %s %s" % (python3_runner, " ".join(sys.argv[1:])))
+ sys.exit(exit_status)
+
+
def _is_test_data(path):
- return os.path.normpath(path).startswith(os.path.normpath(CONFIG["integ.test_directory"]))
+ return os.path.normpath(CONFIG["integ.test_directory"]) in path
def _get_files_with_suffix(base_path, suffix = ".py"):
@@ -258,3 +423,72 @@ def _get_files_with_suffix(base_path, suffix = ".py"):
for filename in files:
if filename.endswith(suffix):
yield os.path.join(root, filename)
+
+
+def run_tasks(category, *tasks):
+ """
+ Runs a series of :class:`test.util.Task` instances. This simply prints 'done'
+ or 'failed' for each unless we fail one that is marked as being required. If
+ that happens then we print its error message and call sys.exit().
+
+ :param str category: label for the series of tasks
+ :param list tasks: **Task** instances to be ran
+ """
+
+ test.output.print_divider(category, True)
+
+ for task in tasks:
+ task.run()
+
+ if task.is_required and task.error:
+ println("\n%s\n" % task.error, ERROR)
+ sys.exit(1)
+
+ println()
+
+
+class Task(object):
+ """
+ Task we can process while running our tests. The runner can return either a
+ message or list of strings for its results.
+ """
+
+ def __init__(self, label, runner, args = None, is_required = True):
+ super(Task, self).__init__()
+
+ self.label = label
+ self.runner = runner
+ self.args = args
+ self.is_required = is_required
+ self.error = None
+
+ def run(self):
+ println(" %s..." % self.label, STATUS, NO_NL)
+
+ padding = 50 - len(self.label)
+ println(" " * padding, NO_NL)
+
+ try:
+ if self.args:
+ result = self.runner(*self.args)
+ else:
+ result = self.runner()
+
+ output_msg = "done"
+
+ if isinstance(result, str):
+ output_msg = result
+
+ println(output_msg, STATUS)
+
+ if isinstance(result, (list, tuple)):
+ for line in result:
+ println(" %s" % line, STATUS)
+ except Exception, exc:
+ output_msg = str(exc)
+
+ if not output_msg or self.is_required:
+ output_msg = "failed"
+
+ println(output_msg, ERROR)
+ self.error = exc
1
0
14 Apr '13
commit 5ccfcfcc1827e38901754dd3fab20c3f9dcba42b
Author: Damian Johnson <atagar(a)torproject.org>
Date: Thu Apr 11 08:50:20 2013 -0700
Dropping support for a --config argument
Lets face it, no one ever uses this. I doubt anyone besides me even knows
*what* the hell it even does. Dropping this will let us simplify the test
runner quite a bit.
---
run_tests.py | 17 +++--------------
test/settings.cfg | 1 -
2 files changed, 3 insertions(+), 15 deletions(-)
diff --git a/run_tests.py b/run_tests.py
index 42ff22e..92c40ab 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -25,8 +25,8 @@ import test.output
import test.runner
import test.static_checks
-OPT = "auist:l:c:h"
-OPT_EXPANDED = ["all", "unit", "integ", "style", "python3", "clean", "targets=", "test=", "log=", "tor=", "config=", "help"]
+OPT = "auist:l:h"
+OPT_EXPANDED = ["all", "unit", "integ", "style", "python3", "clean", "targets=", "test=", "log=", "tor=", "help"]
DIVIDER = "=" * 70
CONFIG = stem.util.conf.config_dict("test", {
@@ -74,7 +74,7 @@ def load_user_configuration(test_config):
fails.
"""
- arg_overrides, config_path = {}, None
+ arg_overrides = {}
try:
opts = getopt.getopt(sys.argv[1:], OPT, OPT_EXPANDED)[0]
@@ -97,8 +97,6 @@ def load_user_configuration(test_config):
arg_overrides["argument.python3"] = "true"
elif opt == "--clean":
arg_overrides["argument.python3_clean"] = "true"
- elif opt in ("-c", "--config"):
- config_path = os.path.abspath(arg)
elif opt in ("-t", "--targets"):
integ_targets = arg.split(",")
@@ -139,15 +137,6 @@ def load_user_configuration(test_config):
sys.exit()
- # load a testrc if '--config' was given, then apply arguments
-
- if config_path:
- try:
- test_config.load(config_path)
- except IOError, exc:
- print "Unable to load testing configuration at '%s': %s" % (config_path, exc)
- sys.exit(1)
-
for key, value in arg_overrides.items():
test_config.set(key, value)
diff --git a/test/settings.cfg b/test/settings.cfg
index cebee31..372ce33 100644
--- a/test/settings.cfg
+++ b/test/settings.cfg
@@ -83,7 +83,6 @@ msg.help
| -l, --log RUNLEVEL includes logging output with test results, runlevels:
| TRACE, DEBUG, INFO, NOTICE, WARN, ERROR
| --tor PATH custom tor binary to run testing against
-| -c, --config PATH path to a custom test configuration
| -h, --help presents this help
|
| Examples:
1
0
commit 930e3608cfef3678357f9e4ad4168f24da19ddf6
Author: Damian Johnson <atagar(a)torproject.org>
Date: Thu Apr 11 08:44:23 2013 -0700
Dropping argument.no_color
Disabling output colorization stopped being a runner argument a long time ago.
Like git, stem's test runner makes a pretty good guess about if colored output
is supported or not. If it's wrong then we can fix the check.
---
run_tests.py | 5 -----
test/output.py | 40 ++++++++++++++++++----------------------
test/settings.cfg | 1 -
3 files changed, 18 insertions(+), 28 deletions(-)
diff --git a/run_tests.py b/run_tests.py
index 3de1fbf..42ff22e 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -38,7 +38,6 @@ CONFIG = stem.util.conf.config_dict("test", {
"argument.test": "",
"argument.log": None,
"argument.tor": "tor",
- "argument.no_color": False,
"msg.help": "",
"target.config": {},
"target.description": {},
@@ -83,10 +82,6 @@ def load_user_configuration(test_config):
print "%s (for usage provide --help)" % exc
sys.exit(1)
- # suppress color output if our output is being piped
- if (not sys.stdout.isatty()) or system.is_windows():
- arg_overrides["argument.no_color"] = "true"
-
for opt, arg in opts:
if opt in ("-a", "--all"):
arg_overrides["argument.unit"] = "true"
diff --git a/test/output.py b/test/output.py
index 17c457e..66e19c6 100644
--- a/test/output.py
+++ b/test/output.py
@@ -9,14 +9,11 @@ together for improved readability.
import re
import sys
-import stem.util.conf
import stem.util.enum
-from stem.util import term
+from stem.util import system, term
-CONFIG = stem.util.conf.config_dict("test", {
- "argument.no_color": False,
-})
+COLOR_SUPPORT = sys.stdout.isatty() and not system.is_windows()
DIVIDER = "=" * 70
HEADER_ATTR = (term.Color.CYAN, term.Attr.BOLD)
@@ -41,19 +38,18 @@ LINE_ATTR = {
def print_line(msg, *attr):
- if CONFIG["argument.no_color"]:
- print msg
- else:
- print term.format(msg, *attr)
+ if COLOR_SUPPORT:
+ msg = term.format(msg, *attr)
+
+ print msg
def print_noline(msg, *attr):
- if CONFIG["argument.no_color"]:
- sys.stdout.write(msg)
- sys.stdout.flush()
- else:
- sys.stdout.write(term.format(msg, *attr))
- sys.stdout.flush()
+ if COLOR_SUPPORT:
+ msg = term.format(msg, *attr)
+
+ sys.stdout.write(msg)
+ sys.stdout.flush()
def print_divider(msg, is_header = False):
@@ -128,10 +124,10 @@ def colorize(line_type, line_content):
Applies escape sequences so each line is colored according to its type.
"""
- if CONFIG["argument.no_color"]:
- return line_content
- else:
- return term.format(line_content, *LINE_ATTR[line_type])
+ if COLOR_SUPPORT:
+ line_content = term.format(line_content, *LINE_ATTR[line_type])
+
+ return line_content
def strip_module(line_type, line_content):
@@ -177,10 +173,10 @@ def align_results(line_type, line_content):
assert False, "Unexpected line type: %s" % line_type
return line_content
- if CONFIG["argument.no_color"]:
- return "%-61s[%s]" % (line_content, term.format(new_ending))
- else:
+ if COLOR_SUPPORT:
return "%-61s[%s]" % (line_content, term.format(new_ending, term.Attr.BOLD))
+ else:
+ return "%-61s[%s]" % (line_content, term.format(new_ending))
class ErrorTracker(object):
diff --git a/test/settings.cfg b/test/settings.cfg
index d75b7d7..cebee31 100644
--- a/test/settings.cfg
+++ b/test/settings.cfg
@@ -42,7 +42,6 @@ argument.style false
argument.test
argument.log
argument.tor tor
-argument.no_color false
integ.test_directory ./test/data
integ.log ./test/data/log
1
0